// croutine.c
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include "FreeRTOS.h"
#include "task.h"
#include "croutine.h"
/* Remove the whole file is co-routines are not being used. */
#if( configUSE_CO_ROUTINES != 0 )
/*
* Some kernel aware debuggers require data to be viewed to be global, rather
* than file scope.
*/
#ifdef portREMOVE_STATIC_QUALIFIER
#define static
#endif
/* Lists for ready and blocked co-routines. --------------------*/
static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */
static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */
static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */
static List_t * pxDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used. */
static List_t * pxOverflowDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */
static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */
/* Other file private variables. --------------------------------*/
CRCB_t * pxCurrentCoRoutine = NULL;
static UBaseType_t uxTopCoRoutineReadyPriority = 0;
static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0;
/* The initial state of the co-routine when it is created. */
#define corINITIAL_STATE ( 0 )
/*
* Place the co-routine represented by pxCRCB into the appropriate ready queue
* for the priority. It is inserted at the end of the list.
*
* This macro accesses the co-routine ready lists and therefore must not be
* used from within an ISR.
*/
#define prvAddCoRoutineToReadyQueue( pxCRCB ) \
{ \
if( pxCRCB->uxPriority > uxTopCoRoutineReadyPriority ) \
{ \
uxTopCoRoutineReadyPriority = pxCRCB->uxPriority; \
} \
vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ pxCRCB->uxPriority ] ), &( pxCRCB->xGenericListItem ) ); \
}
/*
* Utility to ready all the lists used by the scheduler. This is called
* automatically upon the creation of the first co-routine.
*/
static void prvInitialiseCoRoutineLists( void );
/*
* Co-routines that are readied by an interrupt cannot be placed directly into
* the ready lists (there is no mutual exclusion). Instead they are placed in
* in the pending ready list in order that they can later be moved to the ready
* list by the co-routine scheduler.
*/
static void prvCheckPendingReadyList( void );
/*
* Macro that looks at the list of co-routines that are currently delayed to
* see if any require waking.
*
* Co-routines are stored in the queue in the order of their wake time -
* meaning once one co-routine has been found whose timer has not expired
* we need not look any further down the list.
*/
static void prvCheckDelayedList( void );
/*-----------------------------------------------------------*/
BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex )
{
BaseType_t xReturn;
CRCB_t *pxCoRoutine;
/* Allocate the memory that will store the co-routine control block. */
pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) );
if( pxCoRoutine )
{
/* If pxCurrentCoRoutine is NULL then this is the first co-routine to
be created and the co-routine data structures need initialising. */
if( pxCurrentCoRoutine == NULL )
{
pxCurrentCoRoutine = pxCoRoutine;
prvInitialiseCoRoutineLists();
}
/* Check the priority is within limits. */
if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES )
{
uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1;
}
/* Fill out the co-routine control block from the function parameters. */
pxCoRoutine->uxState = corINITIAL_STATE;
pxCoRoutine->uxPriority = uxPriority;
pxCoRoutine->uxIndex = uxIndex;
pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode;
/* Initialise all the other co-routine control block parameters. */
vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) );
vListInitialiseItem( &( pxCoRoutine->xEventListItem ) );
/* Set the co-routine control block as a link back from the ListItem_t.
This is so we can get back to the containing CRCB from a generic item
in a list. */
listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine );
listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine );
/* Event lists are always in priority order. */
listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) );
/* Now the co-routine has been initialised it can be added to the ready
list at the correct priority. */
prvAddCoRoutineToReadyQueue( pxCoRoutine );
xReturn = pdPASS;
}
else
{
xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
}
return xReturn;
}
/*-----------------------------------------------------------*/
void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList )
{
TickType_t xTimeToWake;
/* Calculate the time to wake - this may overflow but this is
not a problem. */
xTimeToWake = xCoRoutineTickCount + xTicksToDelay;
/* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for
both lists. */
( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
/* The list item will be inserted in wake time order. */
listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake );
if( xTimeToWake < xCoRoutineTickCount )
{
/* Wake time has overflowed. Place this item in the
overflow list. */
vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
}
else
{
/* The wake time has not overflowed, so we can use the
current block list. */
vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) );
}
if( pxEventList )
{
/* Also add the co-routine to an event list. If this is done then the
function must be called with interrupts disabled. */
vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) );
}
}
/*-----------------------------------------------------------*/
static void prvCheckPendingReadyList( void )
{
/* Are there any co-routines waiting to get moved to the ready list? These
are co-routines that have been readied by an ISR. The ISR cannot access
the ready lists itself. */
while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE )
{
CRCB_t *pxUnblockedCRCB;
/* The pending ready list can be accessed by an ISR. */
portDISABLE_INTERRUPTS();
{
pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( (&xPendingReadyCoRoutineList) );
( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) );
}
portENABLE_INTERRUPTS();
( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) );
prvAddCoRoutineToReadyQueue( pxUnblockedCRCB );
}
}
/*-----------------------------------------------------------*/
static void prvCheckDelayedList( void )
{
CRCB_t *pxCRCB;
xPassedTicks = xTaskGetTickCount() - xLastTickCount;
while( xPassedTicks )
{
xCoRoutineTickCount++;
xPassedTicks--;
/* If the tick count has overflowed we need to swap the ready lists. */
if( xCoRoutineTickCount == 0 )
{
List_t * pxTemp;
/* Tick count has overflowed so we need to swap the delay lists. If there are
any items in pxDelayedCoRoutineList here then there is an error! */
pxTemp = pxDelayedCoRoutineList;
pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList;
pxOverflowDelayedCoRoutineList = pxTemp;
}
/* See if this tick has made a timeout expire. */
while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE )
{
pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList );
if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) )
{
/* Timeout not yet expired. */
break;
}
portDISABLE_INTERRUPTS();
{
/* The event could have occurred just before this critical
section. If this is the case then the generic list item will
have been moved to the pending ready list and the following
line is still valid. Also the pvContainer parameter will have
been set to NULL so the following lines are also valid. */
( void ) uxListRemove( &( pxCRCB->xGenericListItem ) );
/* Is the co-routine waiting on an event also? */
if( pxCRCB->xEventListItem.pvContainer )
{
( void ) uxListRemove( &( pxCRCB->xEventListItem ) );
}
}
portENABLE_INTERRUPTS();
prvAddCoRoutineToReadyQueue( pxCRCB );
}
}
xLastTickCount = xCoRoutineTickCount;
}
/*-----------------------------------------------------------*/
void vCoRoutineSchedule( void )
{
/* See if any co-routines readied by events need moving to the ready lists. */
prvCheckPendingReadyList();
/* See if any delayed co-routines have timed out. */
prvCheckDelayedList();
/* Find the highest priority queue that contains ready co-routines. */
while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) )
{
if( uxTopCoRoutineReadyPriority == 0 )
{
/* No more co-routines to check. */
return;
}
--uxTopCoRoutineReadyPriority;
}
/* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines
of the same priority get an equal share of the processor time. */
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) );
/* Call the co-routine. */
( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex );
return;
}
/*-----------------------------------------------------------*/
static void prvInitialiseCoRoutineLists( void )
{
UBaseType_t uxPriority;
for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ )
{
vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) );
}
vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 );
vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 );
vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList );
/* Start with pxDelayedCoRoutineList using list1 and the
pxOverflowDelayedCoRoutineList using list2. */
pxDelayedCoRoutineList = &xDelayedCoRoutineList1;
pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2;
}
/*-----------------------------------------------------------*/
BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList )
{
CRCB_t *pxUnblockedCRCB;
BaseType_t xReturn;
/* This function is called from within an interrupt. It can only access
event lists and the pending ready list. This function assumes that a
check has already been made to ensure pxEventList is not empty. */
pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) );
vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) );
if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
}
#endif /* configUSE_CO_ROUTINES == 0 */
// event_groups.c
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
/* Standard includes. */
#include <stdlib.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/* FreeRTOS includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "timers.h"
#include "event_groups.h"
/* Lint e961 and e750 are suppressed as a MISRA exception justified because the
MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
header files above, but not in this file, in order to generate the correct
privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
/* The following bit fields convey control information in a task's event list
item value. It is important they don't clash with the
taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */
#if configUSE_16_BIT_TICKS == 1
#define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
#define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
#define eventWAIT_FOR_ALL_BITS 0x0400U
#define eventEVENT_BITS_CONTROL_BYTES 0xff00U
#else
#define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
#define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
#define eventWAIT_FOR_ALL_BITS 0x04000000UL
#define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
#endif
typedef struct xEventGroupDefinition
{
EventBits_t uxEventBits;
List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */
#if( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxEventGroupNumber;
#endif
#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif
} EventGroup_t;
/*-----------------------------------------------------------*/
/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
* pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor
* are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the
* wait condition is met if any of the bits set in uxBitsToWait for are also set
* in uxCurrentEventBits.
*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer )
{
EventGroup_t *pxEventBits;
/* A StaticEventGroup_t object must be provided. */
configASSERT( pxEventGroupBuffer );
/* The user has provided a statically allocated event group - use it. */
pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 EventGroup_t and StaticEventGroup_t are guaranteed to have the same size and alignment requirement - checked by configASSERT(). */
if( pxEventBits != NULL )
{
pxEventBits->uxEventBits = 0;
vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note that
this event group was created statically in case the event group
is later deleted. */
pxEventBits->ucStaticallyAllocated = pdTRUE;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
traceEVENT_GROUP_CREATE( pxEventBits );
}
else
{
traceEVENT_GROUP_CREATE_FAILED();
}
return ( EventGroupHandle_t ) pxEventBits;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreate( void )
{
EventGroup_t *pxEventBits;
/* Allocate the event group. */
pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) );
if( pxEventBits != NULL )
{
pxEventBits->uxEventBits = 0;
vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note this
event group was allocated statically in case the event group is
later deleted. */
pxEventBits->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
traceEVENT_GROUP_CREATE( pxEventBits );
}
else
{
traceEVENT_GROUP_CREATE_FAILED();
}
return ( EventGroupHandle_t ) pxEventBits;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait )
{
EventBits_t uxOriginalBitValue, uxReturn;
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
BaseType_t xAlreadyYielded;
BaseType_t xTimeoutOccurred = pdFALSE;
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
configASSERT( uxBitsToWaitFor != 0 );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
vTaskSuspendAll();
{
uxOriginalBitValue = pxEventBits->uxEventBits;
( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet );
if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
/* All the rendezvous bits are now set - no need to block. */
uxReturn = ( uxOriginalBitValue | uxBitsToSet );
/* Rendezvous always clear the bits. They will have been cleared
already unless this is the only task in the rendezvous. */
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
xTicksToWait = 0;
}
else
{
if( xTicksToWait != ( TickType_t ) 0 )
{
traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor );
/* Store the bits that the calling task is waiting for in the
task's event list item so the kernel knows when a match is
found. Then enter the blocked state. */
vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait );
/* This assignment is obsolete as uxReturn will get set after
the task unblocks, but some compilers mistakenly generate a
warning about uxReturn being returned without being set if the
assignment is omitted. */
uxReturn = 0;
}
else
{
/* The rendezvous bits were not set, but no block time was
specified - just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
}
}
}
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
{
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired. If
the required bits were set they will have been stored in the task's
event list item, and they should now be retrieved then cleared. */
uxReturn = uxTaskResetEventItemValue();
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
{
uxReturn = pxEventBits->uxEventBits;
/* Although the task got here because it timed out before the
bits it was waiting for were set, it is possible that since it
unblocked another task has set the bits. If this is the case
then it needs to clear the bits before exiting. */
if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
xTimeoutOccurred = pdTRUE;
}
else
{
/* The task unblocked because the bits were set. */
}
/* Control bits might be set as the task had blocked should not be
returned. */
uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
}
traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred );
return uxReturn;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait )
{
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
EventBits_t uxReturn, uxControlBits = 0;
BaseType_t xWaitConditionMet, xAlreadyYielded;
BaseType_t xTimeoutOccurred = pdFALSE;
/* Check the user is not attempting to wait on the bits used by the kernel
itself, and that at least one bit is being requested. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
configASSERT( uxBitsToWaitFor != 0 );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
vTaskSuspendAll();
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
/* Check to see if the wait condition is already met or not. */
xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits );
if( xWaitConditionMet != pdFALSE )
{
/* The wait condition has already been met so there is no need to
block. */
uxReturn = uxCurrentEventBits;
xTicksToWait = ( TickType_t ) 0;
/* Clear the wait bits if requested to do so. */
if( xClearOnExit != pdFALSE )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xTicksToWait == ( TickType_t ) 0 )
{
/* The wait condition has not been met, but no block time was
specified, so just return the current value. */
uxReturn = uxCurrentEventBits;
}
else
{
/* The task is going to block to wait for its required bits to be
set. uxControlBits are used to remember the specified behaviour of
this call to xEventGroupWaitBits() - for use when the event bits
unblock the task. */
if( xClearOnExit != pdFALSE )
{
uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xWaitForAllBits != pdFALSE )
{
uxControlBits |= eventWAIT_FOR_ALL_BITS;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Store the bits that the calling task is waiting for in the
task's event list item so the kernel knows when a match is
found. Then enter the blocked state. */
vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait );
/* This is obsolete as it will get set after the task unblocks, but
some compilers mistakenly generate a warning about the variable
being returned without being set if it is not done. */
uxReturn = 0;
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
{
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired. If
the required bits were set they will have been stored in the task's
event list item, and they should now be retrieved then cleared. */
uxReturn = uxTaskResetEventItemValue();
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
/* It is possible that the event bits were updated between this
task leaving the Blocked state and running again. */
if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE )
{
if( xClearOnExit != pdFALSE )
{
pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
/* Prevent compiler warnings when trace macros are not used. */
xTimeoutOccurred = pdFALSE;
}
else
{
/* The task unblocked because the bits were set. */
}
/* The task blocked so control bits may have been set. */
uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
}
traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred );
return uxReturn;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear )
{
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
EventBits_t uxReturn;
/* Check the user is not attempting to clear the bits used by the kernel
itself. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL();
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
/* The value returned is the event group value prior to the bits being
cleared. */
uxReturn = pxEventBits->uxEventBits;
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
return uxReturn;
}
/*-----------------------------------------------------------*/
#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear )
{
BaseType_t xReturn;
traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear );
xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL );
return xReturn;
}
#endif
/*-----------------------------------------------------------*/
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus;
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
EventBits_t uxReturn;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
uxReturn = pxEventBits->uxEventBits;
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return uxReturn;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet )
{
ListItem_t *pxListItem, *pxNext;
ListItem_t const *pxListEnd;
List_t *pxList;
EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits;
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
BaseType_t xMatchFound = pdFALSE;
/* Check the user is not attempting to set the bits used by the kernel
itself. */
configASSERT( xEventGroup );
configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
vTaskSuspendAll();
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
pxListItem = listGET_HEAD_ENTRY( pxList );
/* Set the bits. */
pxEventBits->uxEventBits |= uxBitsToSet;
/* See if the new bit value should unblock any tasks. */
while( pxListItem != pxListEnd )
{
pxNext = listGET_NEXT( pxListItem );
uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem );
xMatchFound = pdFALSE;
/* Split the bits waited for from the control bits. */
uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES;
uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES;
if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 )
{
/* Just looking for single bit being set. */
if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 )
{
xMatchFound = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor )
{
/* All bits are set. */
xMatchFound = pdTRUE;
}
else
{
/* Need all bits to be set, but not all the bits were set. */
}
if( xMatchFound != pdFALSE )
{
/* The bits match. Should the bits be cleared on exit? */
if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 )
{
uxBitsToClear |= uxBitsWaitedFor;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Store the actual event flag value in the task's event list
item before removing the task from the event list. The
eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows
that is was unblocked due to its required bits matching, rather
than because it timed out. */
( void ) xTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET );
}
/* Move onto the next list item. Note pxListItem->pxNext is not
used here as the list item may have been removed from the event list
and inserted into the ready/pending reading list. */
pxListItem = pxNext;
}
/* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT
bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
( void ) xTaskResumeAll();
return pxEventBits->uxEventBits;
}
/*-----------------------------------------------------------*/
void vEventGroupDelete( EventGroupHandle_t xEventGroup )
{
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll();
{
traceEVENT_GROUP_DELETE( xEventGroup );
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
and cannot therefore have any bits set. */
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The event group can only have been allocated dynamically - free
it again. */
vPortFree( pxEventBits );
}
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The event group could have been allocated statically or
dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxEventBits );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
( void ) xTaskResumeAll();
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'set bits' command that was pended from
an interrupt. */
void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet )
{
( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet );
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'clear bits' command that was pended from
an interrupt. */
void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear )
{
( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear );
}
/*-----------------------------------------------------------*/
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits )
{
BaseType_t xWaitConditionMet = pdFALSE;
if( xWaitForAllBits == pdFALSE )
{
/* Task only has to wait for one bit within uxBitsToWaitFor to be
set. Is one already set? */
if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 )
{
xWaitConditionMet = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Task has to wait for all the bits in uxBitsToWaitFor to be set.
Are they set already? */
if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor )
{
xWaitConditionMet = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
return xWaitConditionMet;
}
/*-----------------------------------------------------------*/
#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet );
xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken );
return xReturn;
}
#endif
/*-----------------------------------------------------------*/
#if (configUSE_TRACE_FACILITY == 1)
UBaseType_t uxEventGroupGetNumber( void* xEventGroup )
{
UBaseType_t xReturn;
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
if( xEventGroup == NULL )
{
xReturn = 0;
}
else
{
xReturn = pxEventBits->uxEventGroupNumber;
}
return xReturn;
}
#endif
// FreeRTOSCommonHooks.c
/*
File Name : FreeRTOSCommonHooks.c
Author : Yichip
Version : V1.0
Date : 2021/8/16
Description : FreeRTOS Common Hooks encapsulation.
*/
#include "FreeRTOS.h"
#include "task.h"
#include "system.h"
void vApplicationIdleHook( void )
{
/* vApplicationIdleHook() will only be called if configUSE_IDLE_HOOK is set
to 1 in FreeRTOSConfig.h. It will be called on each iteration of the idle
task. It is essential that code added to this hook function never attempts
to block in any way (for example, call xQueueReceive() with a block time
specified, or call vTaskDelay()). If the application makes use of the
vTaskDelete() API function (as this demo application does) then it is also
important that vApplicationIdleHook() is permitted to return to its calling
function, because it is the responsibility of the idle task to clean up
memory allocated by the kernel to any task that has since been deleted. */
asm("wfi");
}
__attribute__((noreturn)) void vApplicationStackOverflowHook( xTaskHandle pxTask, char *pcTaskName )
{
( void ) pcTaskName;
( void ) pxTask;
/* Run time stack overflow checking is performed if
configCHECK_FOR_STACK_OVERFLOW is defined to 1 or 2. This hook
function is called if a stack overflow is detected. */
taskDISABLE_INTERRUPTS();
MyPrintf("*********************\n");
MyPrintf("* Stack Overflow *\n");
MyPrintf("*********************\n");
MyPrintf("* *\n");
MyPrintf("* Task Name: *\n");
MyPrintf("* %-17s *\n", pcTaskName);
MyPrintf("* *\n");
MyPrintf("*********************\n");
while (1)
;
}
__attribute__((noreturn)) void vApplicationMallocFailedHook( size_t xWantedSize )
{
/* Called if a call to pvPortMalloc() fails because there is insufficient
free memory available in the FreeRTOS heap. pvPortMalloc() is called
internally by FreeRTOS API functions that create tasks, queues, software
timers, and semaphores. The size of the FreeRTOS heap is set by the
configTOTAL_HEAP_SIZE configuration constant in FreeRTOSConfig.h. */
taskDISABLE_INTERRUPTS();
MyPrintf("*********************\n");
MyPrintf("* Malloc failed *\n");
MyPrintf("*********************\n");
MyPrintf("* *\n");
MyPrintf("* Size: %-10d *\n", xWantedSize);
MyPrintf("* *\n");
MyPrintf("*********************\n");
while (1)
;
}
void vApplicationTickHook( void )
{
/* This function will be called by each tick interrupt if
configUSE_TICK_HOOK is set to 1 in FreeRTOSConfig.h. User code can be
added here, but the tick hook is called from an interrupt context, so
code must not attempt to block, and only the interrupt safe FreeRTOS API
functions can be used (those that end in FromISR()). */
}
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
/* configUSE_STATIC_ALLOCATION is set to 1, so the application must provide an
implementation of vApplicationGetIdleTaskMemory() to provide the memory that is
used by the Idle task. */
void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize )
{
/* If the buffers to be provided to the Idle task are declared inside this
function then they must be declared static - otherwise they will be allocated on
the stack and so not exists after this function exits. */
static StaticTask_t xIdleTaskTCB;
static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
/* Pass out a pointer to the StaticTask_t structure in which the Idle task's
state will be stored. */
*ppxIdleTaskTCBBuffer = &xIdleTaskTCB;
/* Pass out the array that will be used as the Idle task's stack. */
*ppxIdleTaskStackBuffer = uxIdleTaskStack;
/* Pass out the size of the array pointed to by *ppxIdleTaskStackBuffer.
Note that, as the array is necessarily of type StackType_t,
configMINIMAL_STACK_SIZE is specified in words, not bytes. */
*pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
}
/* configUSE_STATIC_ALLOCATION and configUSE_TIMERS are both set to 1, so the
application must provide an implementation of vApplicationGetTimerTaskMemory()
to provide the memory that is used by the Timer service task. */
void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize )
{
/* If the buffers to be provided to the Timer task are declared inside this
function then they must be declared static - otherwise they will be allocated on
the stack and so not exists after this function exits. */
static StaticTask_t xTimerTaskTCB;
static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ];
/* Pass out a pointer to the StaticTask_t structure in which the Timer
task's state will be stored. */
*ppxTimerTaskTCBBuffer = &xTimerTaskTCB;
/* Pass out the array that will be used as the Timer task's stack. */
*ppxTimerTaskStackBuffer = uxTimerTaskStack;
/* Pass out the size of the array pointed to by *ppxTimerTaskStackBuffer.
Note that, as the array is necessarily of type StackType_t,
configMINIMAL_STACK_SIZE is specified in words, not bytes. */
*pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
}
#endif
// FreeRTOSConfig.h
/*
* FreeRTOS Kernel V10.0.1
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
#ifndef FREERTOS_CONFIG_H
#define FREERTOS_CONFIG_H
#include "yc3121.h"
#include "platform.h"
/*-----------------------------------------------------------
* Application specific definitions.
*
* These definitions should be adjusted for your particular hardware and
* application requirements.
*
* THESE PARAMETERS ARE DESCRIBED WITHIN THE 'CONFIGURATION' SECTION OF THE
* FreeRTOS API DOCUMENTATION AVAILABLE ON THE FreeRTOS.org WEB SITE.
*
* See http://www.freertos.org/a00110.html.
*----------------------------------------------------------*/
#define configUSE_PREEMPTION 1
#define configUSE_IDLE_HOOK 1
#define configUSE_TICK_HOOK 0
#define configCPU_CLOCK_HZ ( CPU_MHZ )
#define configTICK_RATE_HZ ( ( TickType_t ) TICK_RATE_HZ )
#define configMAX_PRIORITIES ( 5 )
#define configMINIMAL_STACK_SIZE ( ( unsigned short ) 128 )
#define configTOTAL_HEAP_SIZE ( ( size_t ) ( 15 * 1024 ) )
#define configMAX_TASK_NAME_LEN ( 8 )
#define configUSE_TRACE_FACILITY 0
#define configUSE_16_BIT_TICKS 0
#define configIDLE_SHOULD_YIELD 1
#define configUSE_MUTEXES 1
#define configCHECK_FOR_STACK_OVERFLOW 1
#define configGENERATE_RUN_TIME_STATS 0
#define configUSE_RECURSIVE_MUTEXES 1
#define configQUEUE_REGISTRY_SIZE 0
#define configUSE_COUNTING_SEMAPHORES 0
#define configUSE_MALLOC_FAILED_HOOK 1
#define configUSE_APPLICATION_TASK_TAG 0
#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0
#define configUSE_TASK_EXTENSION 0
/* Co-routine definitions. */
#define configUSE_CO_ROUTINES 0
#define configMAX_CO_ROUTINE_PRIORITIES (2)
/* Software timer definitions. */
#define configUSE_TIMERS 1
#define configTIMER_TASK_PRIORITY ( configMAX_PRIORITIES - 1 )
#define configTIMER_QUEUE_LENGTH 5
#define configTIMER_TASK_STACK_DEPTH ( configMINIMAL_STACK_SIZE * 5 )
/* Set the following definitions to 1 to include the API function, or zero
to exclude the API function. */
#define INCLUDE_vTaskPrioritySet 1
#define INCLUDE_uxTaskPriorityGet 1
#define INCLUDE_vTaskDelete 1
#define INCLUDE_vTaskCleanUpResources 0
#define INCLUDE_vTaskSuspend 1
#define INCLUDE_vTaskDelayUntil 1
#define INCLUDE_vTaskDelay 1
/* This demo makes use of one or more example stats formatting functions. These
format the raw data provided by the uxTaskGetSystemState() function in to human
readable ASCII form. See the notes in the implementation of vTaskList() within
FreeRTOS/Source/tasks.c for limitations. */
#define configUSE_STATS_FORMATTING_FUNCTIONS 0
/*
enable queue sets
*/
#define configUSE_QUEUE_SETS 1
#endif /* FREERTOS_CONFIG_H */
// list.c
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <stdlib.h>
#include "FreeRTOS.h"
#include "list.h"
/*-----------------------------------------------------------
* PUBLIC LIST API documented in list.h
*----------------------------------------------------------*/
void vListInitialise( List_t * const pxList )
{
/* The list structure contains a list item which is used to mark the
end of the list. To initialise the list the list end is inserted
as the only list entry. */
pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
/* The list end value is the highest possible value in the list to
ensure it remains at the end of the list. */
pxList->xListEnd.xItemValue = portMAX_DELAY;
/* The list end next and previous pointers point to itself so we know
when the list is empty. */
pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
pxList->uxNumberOfItems = ( UBaseType_t ) 0U;
/* Write known values into the list if
configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList );
listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList );
}
/*-----------------------------------------------------------*/
void vListInitialiseItem( ListItem_t * const pxItem )
{
/* Make sure the list item is not recorded as being on a list. */
pxItem->pvContainer = NULL;
/* Write known values into the list item if
configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */
listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem );
}
/*-----------------------------------------------------------*/
void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem )
{
ListItem_t * const pxIndex = pxList->pxIndex;
/* Only effective when configASSERT() is also defined, these tests may catch
the list data structures being overwritten in memory. They will not catch
data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
/* Insert a new list item into pxList, but rather than sort the list,
makes the new list item the last item to be removed by a call to
listGET_OWNER_OF_NEXT_ENTRY(). */
pxNewListItem->pxNext = pxIndex;
pxNewListItem->pxPrevious = pxIndex->pxPrevious;
/* Only used during decision coverage testing. */
mtCOVERAGE_TEST_DELAY();
pxIndex->pxPrevious->pxNext = pxNewListItem;
pxIndex->pxPrevious = pxNewListItem;
/* Remember which list the item is in. */
pxNewListItem->pvContainer = ( void * ) pxList;
( pxList->uxNumberOfItems )++;
}
/*-----------------------------------------------------------*/
void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem )
{
ListItem_t *pxIterator;
const TickType_t xValueOfInsertion = pxNewListItem->xItemValue;
/* Only effective when configASSERT() is also defined, these tests may catch
the list data structures being overwritten in memory. They will not catch
data errors caused by incorrect configuration or use of FreeRTOS. */
listTEST_LIST_INTEGRITY( pxList );
listTEST_LIST_ITEM_INTEGRITY( pxNewListItem );
/* Insert the new list item into the list, sorted in xItemValue order.
If the list already contains a list item with the same item value then the
new list item should be placed after it. This ensures that TCB's which are
stored in ready lists (all of which have the same xItemValue value) get a
share of the CPU. However, if the xItemValue is the same as the back marker
the iteration loop below will not end. Therefore the value is checked
first, and the algorithm slightly modified if necessary. */
if( xValueOfInsertion == portMAX_DELAY )
{
pxIterator = pxList->xListEnd.pxPrevious;
}
else
{
/* *** NOTE ***********************************************************
If you find your application is crashing here then likely causes are
listed below. In addition see http://www.freertos.org/FAQHelp.html for
more tips, and ensure configASSERT() is defined!
http://www.freertos.org/a00110.html#configASSERT
1) Stack overflow -
see http://www.freertos.org/Stacks-and-stack-overflow-checking.html
2) Incorrect interrupt priority assignment, especially on Cortex-M
parts where numerically high priority values denote low actual
interrupt priorities, which can seem counter intuitive. See
http://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition
of configMAX_SYSCALL_INTERRUPT_PRIORITY on
http://www.freertos.org/a00110.html
3) Calling an API function from within a critical section or when
the scheduler is suspended, or calling an API function that does
not end in "FromISR" from an interrupt.
4) Using a queue or semaphore before it has been initialised or
before the scheduler has been started (are interrupts firing
before vTaskStartScheduler() has been called?).
**********************************************************************/
for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
{
/* There is nothing to do here, just iterating to the wanted
insertion position. */
}
}
pxNewListItem->pxNext = pxIterator->pxNext;
pxNewListItem->pxNext->pxPrevious = pxNewListItem;
pxNewListItem->pxPrevious = pxIterator;
pxIterator->pxNext = pxNewListItem;
/* Remember which list the item is in. This allows fast removal of the
item later. */
pxNewListItem->pvContainer = ( void * ) pxList;
( pxList->uxNumberOfItems )++;
}
/*-----------------------------------------------------------*/
UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove )
{
/* The list item knows which list it is in. Obtain the list from the list
item. */
List_t * const pxList = ( List_t * ) pxItemToRemove->pvContainer;
pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious;
pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext;
/* Only used during decision coverage testing. */
mtCOVERAGE_TEST_DELAY();
/* Make sure the index is left pointing to a valid item. */
if( pxList->pxIndex == pxItemToRemove )
{
pxList->pxIndex = pxItemToRemove->pxPrevious;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxItemToRemove->pvContainer = NULL;
( pxList->uxNumberOfItems )--;
return pxList->uxNumberOfItems;
}
/*-----------------------------------------------------------*/
// queue.c
/*
FreeRTOS V9.0.0 - Copyright (C) 2016 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
***************************************************************************
>>! NOTE: The modification to the GPL is included to allow you to !<<
>>! distribute a combined work that includes FreeRTOS without being !<<
>>! obliged to provide the source code for proprietary components !<<
>>! outside of the FreeRTOS kernel. !<<
***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
***************************************************************************
* *
* FreeRTOS provides completely free yet professionally developed, *
* robust, strictly quality controlled, supported, and cross *
* platform software that is more than just the market leader, it *
* is the industry's de facto standard. *
* *
* Help yourself get started quickly while simultaneously helping *
* to support the FreeRTOS project by purchasing a FreeRTOS *
* tutorial book, reference manual, or both: *
* http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
the FAQ page "My application does not run, what could be wrong?". Have you
defined configASSERT()?
http://www.FreeRTOS.org/support - In return for receiving this top quality
embedded software for free we request you assist our global community by
participating in the support forum.
http://www.FreeRTOS.org/training - Investing in training allows your team to
be as productive as possible as early as possible. Now you can receive
FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
mission critical applications that require provable dependability.
1 tab == 4 spaces!
*/
#include <stdlib.h>
#include <string.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#include "FreeRTOS.h"
#include "task.h"
#include "queue.h"
#if ( configUSE_CO_ROUTINES == 1 )
#include "croutine.h"
#endif
/* Lint e961 and e750 are suppressed as a MISRA exception justified because the
MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
header files above, but not in this file, in order to generate the correct
privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
/* Constants used with the cRxLock and cTxLock structure members. */
#define queueUNLOCKED ( ( int8_t ) -1 )
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
/* When the Queue_t structure is used to represent a base queue its pcHead and
pcTail members are used as pointers into the queue storage area. When the
Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
not necessary, and the pcHead pointer is set to NULL to indicate that the
pcTail pointer actually points to the mutex holder (if any). Map alternative
names to the pcHead and pcTail structure members to ensure the readability of
the code is maintained despite this dual use of two structure members. An
alternative implementation would be to use a union, but use of a union is
against the coding standard (although an exception to the standard has been
permitted where the dual use also significantly changes the type of the
structure member). */
#define pxMutexHolder pcTail
#define uxQueueType pcHead
#define queueQUEUE_IS_MUTEX NULL
/* Semaphores do not actually store or copy data, so have an item size of
zero. */
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
#if( configUSE_PREEMPTION == 0 )
/* If the cooperative scheduler is being used then a yield should not be
performed just because a higher priority task has been woken. */
#define queueYIELD_IF_USING_PREEMPTION()
#else
#define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
#endif
/*
* Definition of the queue used by the scheduler.
* Items are queued by copy, not reference. See the following link for the
* rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
*/
typedef struct QueueDefinition
{
int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
{
int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} u;
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
#endif
#if ( configUSE_QUEUE_SETS == 1 )
struct QueueDefinition *pxQueueSetContainer;
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxQueueNumber;
uint8_t ucQueueType;
#endif
} xQUEUE;
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
name below to enable the use of older kernel aware debuggers. */
typedef xQUEUE Queue_t;
/*-----------------------------------------------------------*/
/*
* The queue registry is just a means for kernel aware debuggers to locate
* queue structures. It has no other purpose so is an optional component.
*/
#if ( configQUEUE_REGISTRY_SIZE > 0 )
/* The type stored within the queue registry array. This allows a name
to be assigned to each queue making kernel aware debugging a little
more user friendly. */
typedef struct QUEUE_REGISTRY_ITEM
{
const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
QueueHandle_t xHandle;
} xQueueRegistryItem;
/* The old xQueueRegistryItem name is maintained above then typedefed to the
new xQueueRegistryItem name below to enable the use of older kernel aware
debuggers. */
typedef xQueueRegistryItem QueueRegistryItem_t;
/* The queue registry is simply an array of QueueRegistryItem_t structures.
The pcQueueName member of a structure being NULL is indicative of the
array position being vacant. */
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
#endif /* configQUEUE_REGISTRY_SIZE */
/*
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
* prevent an ISR from adding or removing items to the queue, but does prevent
* an ISR from removing tasks from the queue event lists. If an ISR finds a
* queue is locked it will instead increment the appropriate queue lock count
* to indicate that a task may require unblocking. When the queue in unlocked
* these lock counts are inspected, and the appropriate action taken.
*/
static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
/*
* Uses a critical section to determine if there is any data in a queue.
*
* @return pdTRUE if the queue contains no items, otherwise pdFALSE.
*/
static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
/*
* Uses a critical section to determine if there is any space in a queue.
*
* @return pdTRUE if there is no space, otherwise pdFALSE;
*/
static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
/*
* Copies an item into the queue, either at the front of the queue or the
* back of the queue.
*/
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
/*
* Copies an item out of a queue.
*/
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
#if ( configUSE_QUEUE_SETS == 1 )
/*
* Checks to see if a queue is a member of a queue set, and if so, notifies
* the queue set that the queue contains data.
*/
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
#endif
/*
* Called after a Queue_t structure has been allocated either statically or
* dynamically to fill in the structure's members.
*/
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
/*
* Mutexes are a special type of queue. When a mutex is created, first the
* queue is created, then prvInitialiseMutex() is called to configure the queue
* as a mutex.
*/
#if( configUSE_MUTEXES == 1 )
static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
#endif
/*-----------------------------------------------------------*/
/*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists.
*/
#define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \
{ \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
} \
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \
} \
taskEXIT_CRITICAL()
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
taskENTER_CRITICAL();
{
pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxQueue->pcWriteTo = pxQueue->pcHead;
pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
pxQueue->cRxLock = queueUNLOCKED;
pxQueue->cTxLock = queueUNLOCKED;
if( xNewQueue == pdFALSE )
{
/* If there are tasks blocked waiting to read from the queue, then
the tasks will remain blocked as after this function exits the queue
will still be empty. If there are tasks blocked waiting to write to
the queue, then one should be unblocked as after this function exits
it will be possible to write to it. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Ensure the event queues start in the correct state. */
vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
}
}
taskEXIT_CRITICAL();
/* A value is returned for calling semantic consistency with previous
versions. */
return pdPASS;
}
/*-----------------------------------------------------------*/
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
{
Queue_t *pxNewQueue;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
/* The StaticQueue_t structure and the queue storage area must be
supplied. */
configASSERT( pxStaticQueue != NULL );
/* A queue storage area should be provided if the item size is not 0, and
should not be provided if the item size is 0. */
configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
#if( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
variable of type StaticQueue_t or StaticSemaphore_t equals the size of
the real queue and semaphore structures. */
volatile size_t xSize = sizeof( StaticQueue_t );
configASSERT( xSize == sizeof( Queue_t ) );
}
#endif /* configASSERT_DEFINED */
/* The address of a statically allocated queue was passed in, use it.
The address of a statically allocated storage area was also passed in
but is already set. */
pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
if( pxNewQueue != NULL )
{
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
{
/* Queues can be allocated wither statically or dynamically, so
note this queue was allocated statically in case the queue is
later deleted. */
pxNewQueue->ucStaticallyAllocated = pdTRUE;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
return pxNewQueue;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
{
Queue_t *pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t *pucQueueStorage;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
if( uxItemSize == ( UBaseType_t ) 0 )
{
/* There is not going to be a queue storage area. */
xQueueSizeInBytes = ( size_t ) 0;
}
else
{
/* Allocate enough space to hold the maximum number of items that
can be in the queue at any time. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
if( pxNewQueue != NULL )
{
/* Jump past the queue structure to find the location of the queue
storage area. */
pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Queues can be created either statically or dynamically, so
note this task was created dynamically in case it is later
deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
return pxNewQueue;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
{
/* Remove compiler warnings about unused parameters should
configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType;
if( uxItemSize == ( UBaseType_t ) 0 )
{
/* No RAM was allocated for the queue storage area, but PC head cannot
be set to NULL because NULL is used as a key to say the queue is used as
a mutex. Therefore just set pcHead to point to the queue as a benign
value that is known to be within the memory map. */
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
}
else
{
/* Set the head to the start of the queue storage area. */
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
}
/* Initialise the queue members as described where the queue type is
defined. */
pxNewQueue->uxLength = uxQueueLength;
pxNewQueue->uxItemSize = uxItemSize;
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
#if ( configUSE_TRACE_FACILITY == 1 )
{
pxNewQueue->ucQueueType = ucQueueType;
}
#endif /* configUSE_TRACE_FACILITY */
#if( configUSE_QUEUE_SETS == 1 )
{
pxNewQueue->pxQueueSetContainer = NULL;
}
#endif /* configUSE_QUEUE_SETS */
traceQUEUE_CREATE( pxNewQueue );
}
/*-----------------------------------------------------------*/
#if( configUSE_MUTEXES == 1 )
static void prvInitialiseMutex( Queue_t *pxNewQueue )
{
if( pxNewQueue != NULL )
{
/* The queue create function will set all the queue structure members
correctly for a generic queue, but this function is creating a
mutex. Overwrite those members that need to be set differently -
in particular the information required for priority inheritance. */
pxNewQueue->pxMutexHolder = NULL;
pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
/* In case this is a recursive mutex. */
pxNewQueue->u.uxRecursiveCallCount = 0;
traceCREATE_MUTEX( pxNewQueue );
/* Start with the semaphore in the expected state. */
( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
}
else
{
traceCREATE_MUTEX_FAILED();
}
}
#endif /* configUSE_MUTEXES */
/*-----------------------------------------------------------*/
#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
{
Queue_t *pxNewQueue;
const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
prvInitialiseMutex( pxNewQueue );
return pxNewQueue;
}
#endif /* configUSE_MUTEXES */
/*-----------------------------------------------------------*/
#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
{
Queue_t *pxNewQueue;
const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
/* Prevent compiler warnings about unused parameters if
configUSE_TRACE_FACILITY does not equal 1. */
( void ) ucQueueType;
pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
prvInitialiseMutex( pxNewQueue );
return pxNewQueue;
}
#endif /* configUSE_MUTEXES */
/*-----------------------------------------------------------*/
#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
{
void *pxReturn;
/* This function is called by xSemaphoreGetMutexHolder(), and should not
be called directly. Note: This is a good way of determining if the
calling task is the mutex holder, but not a good way of determining the
identity of the mutex holder, as the holder may change between the
following critical section exiting and the function returning. */
taskENTER_CRITICAL();
{
if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
{
pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
}
else
{
pxReturn = NULL;
}
}
taskEXIT_CRITICAL();
return pxReturn;
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
#endif
/*-----------------------------------------------------------*/
#if ( configUSE_RECURSIVE_MUTEXES == 1 )
BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
{
BaseType_t xReturn;
Queue_t * const pxMutex = ( Queue_t * ) xMutex;
configASSERT( pxMutex );
/* If this is the task that holds the mutex then pxMutexHolder will not
change outside of this task. If this task does not hold the mutex then
pxMutexHolder can never coincidentally equal the tasks handle, and as
this is the only condition we are interested in it does not matter if
pxMutexHolder is accessed simultaneously by another task. Therefore no
mutual exclusion is required to test the pxMutexHolder variable. */
if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
{
traceGIVE_MUTEX_RECURSIVE( pxMutex );
/* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
the task handle, therefore no underflow check is required. Also,
uxRecursiveCallCount is only modified by the mutex holder, and as
there can only be one, no mutual exclusion is required to modify the
uxRecursiveCallCount member. */
( pxMutex->u.uxRecursiveCallCount )--;
/* Has the recursive call count unwound to 0? */
if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
{
/* Return the mutex. This will automatically unblock any other
task that might be waiting to access the mutex. */
( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xReturn = pdPASS;
}
else
{
/* The mutex cannot be given because the calling task is not the
holder. */
xReturn = pdFAIL;
traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
}
return xReturn;
}
#endif /* configUSE_RECURSIVE_MUTEXES */
/*-----------------------------------------------------------*/
#if ( configUSE_RECURSIVE_MUTEXES == 1 )
BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
{
BaseType_t xReturn;
Queue_t * const pxMutex = ( Queue_t * ) xMutex;
configASSERT( pxMutex );
/* Comments regarding mutual exclusion as per those within
xQueueGiveMutexRecursive(). */
traceTAKE_MUTEX_RECURSIVE( pxMutex );
if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
{
( pxMutex->u.uxRecursiveCallCount )++;
xReturn = pdPASS;
}
else
{
xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
/* pdPASS will only be returned if the mutex was successfully
obtained. The calling task may have entered the Blocked state
before reaching here. */
if( xReturn != pdFAIL )
{
( pxMutex->u.uxRecursiveCallCount )++;
}
else
{
traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
}
}
return xReturn;
}
#endif /* configUSE_RECURSIVE_MUTEXES */
/*-----------------------------------------------------------*/
#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
{
QueueHandle_t xHandle;
configASSERT( uxMaxCount != 0 );
configASSERT( uxInitialCount <= uxMaxCount );
xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
if( xHandle != NULL )
{
( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
traceCREATE_COUNTING_SEMAPHORE();
}
else
{
traceCREATE_COUNTING_SEMAPHORE_FAILED();
}
return xHandle;
}
#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
/*-----------------------------------------------------------*/
#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
{
QueueHandle_t xHandle;
configASSERT( uxMaxCount != 0 );
configASSERT( uxInitialCount <= uxMaxCount );
xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
if( xHandle != NULL )
{
( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
traceCREATE_COUNTING_SEMAPHORE();
}
else
{
traceCREATE_COUNTING_SEMAPHORE_FAILED();
}
return xHandle;
}
#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
{
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/* This function relaxes the coding standard somewhat to allow return
statements within the function itself. This is done in the interest
of execution time efficiency. */
for( ;; )
{
taskENTER_CRITICAL();
{
/* Is there room on the queue now? The running task must be the
highest priority task wanting to access the queue. If the head item
in the queue is to be overwritten then it does not matter if the
queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to
do this from within the critical section - the
kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes
and the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to do
this from within the critical section - the kernel
takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes and
the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/* The queue was full and no block time is specified (or
the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting
the function. */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was full and a block time was specified so
configure the timeout structure. */
vTaskSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the
event list. It is possible that interrupts occurring now
remove this task from the event list again - but as the
scheduler is suspended the task will go onto the pending
ready last instead of the actual ready list. */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
ready list into the ready list - so it is feasible that this
task is already in a ready list before it yields - in which
case the yield will not cause a context switch unless there
is also a higher priority task in the pending ready list. */
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
}
}
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
/* Similar to xQueueGenericSend, except without blocking if there is no room
in the queue. Also don't directly wake a task that was blocked on a queue
read, instead return a flag to say whether a context switch is required or
not (i.e. has a task with a higher priority than us been woken by this
post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED )
{
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
}
xReturn = pdPASS;
}
else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* Similar to xQueueGenericSendFromISR() but used with semaphores where the
item size is 0. Don't directly wake a task that was blocked on a queue
read, instead return a flag to say whether a context switch is required or
not (i.e. has a task with a higher priority than us been woken by this
post). */
configASSERT( pxQueue );
/* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
if the item size is not 0. */
configASSERT( pxQueue->uxItemSize == 0 );
/* Normally a mutex would not be given from an interrupt, especially if
there is a mutex holder, as priority inheritance makes no sense for an
interrupts, only tasks. */
configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* When the queue is used to implement a semaphore no data is ever
moved through the queue but it is still valid to see if the queue 'has
space'. */
if( uxMessagesWaiting < pxQueue->uxLength )
{
const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* A task can only have an inherited priority if it is a mutex
holder - and if there is a mutex holder then the mutex cannot be
given from an ISR. As this is the ISR version of the function it
can be assumed there is no mutex holder and no need to determine if
priority disinheritance is needed. Simply increase the count of
messages (semaphores) available. */
pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED )
{
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
{
/* The semaphore is a member of a queue set, and
posting to the queue set caused a higher priority
task to unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
}
xReturn = pdPASS;
}
else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
{
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
int8_t *pcOriginalReadPosition;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/* This function relaxes the coding standard somewhat to allow return
statements within the function itself. This is done in the interest
of execution time efficiency. */
for( ;; )
{
taskENTER_CRITICAL();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Remember the read position in case the queue is only being
peeked. */
pcOriginalReadPosition = pxQueue->u.pcReadFrom;
prvCopyDataFromQueue( pxQueue, pvBuffer );
if( xJustPeeking == pdFALSE )
{
traceQUEUE_RECEIVE( pxQueue );
/* Actually removing data, not just peeking. */
pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
#if ( configUSE_MUTEXES == 1 )
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
/* Record the information required to implement
priority inheritance should it become necessary. */
pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_MUTEXES */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
traceQUEUE_PEEK( pxQueue );
/* The data is not being removed, so reset the read
pointer. */
pxQueue->u.pcReadFrom = pcOriginalReadPosition;
/* The data is being left in the queue, so see if there are
any other tasks waiting for the data. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority than this task. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/* The queue was empty and no block time is specified (or
the block time has expired) so leave now. */
taskEXIT_CRITICAL();
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was empty and a block time was specified so
configure the timeout structure. */
vTaskSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
#if ( configUSE_MUTEXES == 1 )
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
taskENTER_CRITICAL();
{
vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
}
taskEXIT_CRITICAL();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
}
}
else
{
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
}
/*-----------------------------------------------------------*/
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Cannot block in an ISR, so check there is data available. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
const int8_t cRxLock = pxQueue->cRxLock;
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
prvCopyDataFromQueue( pxQueue, pvBuffer );
pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
/* If the queue is locked the event list will not be modified.
Instead update the lock count so the task that unlocks the queue
will know that an ISR has removed data while the queue was
locked. */
if( cRxLock == queueUNLOCKED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
/* The task waiting has a higher priority than us so
force a context switch. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
}
xReturn = pdPASS;
}
else
{
xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
int8_t *pcOriginalReadPosition;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
traceQUEUE_PEEK_FROM_ISR( pxQueue );
/* Remember the read position so it can be reset as nothing is
actually being removed from the queue. */
pcOriginalReadPosition = pxQueue->u.pcReadFrom;
prvCopyDataFromQueue( pxQueue, pvBuffer );
pxQueue->u.pcReadFrom = pcOriginalReadPosition;
xReturn = pdPASS;
}
else
{
xReturn = pdFAIL;
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
{
UBaseType_t uxReturn;
configASSERT( xQueue );
taskENTER_CRITICAL();
{
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
/*-----------------------------------------------------------*/
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
{
UBaseType_t uxReturn;
Queue_t *pxQueue;
pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
taskENTER_CRITICAL();
{
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
/*-----------------------------------------------------------*/
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
{
UBaseType_t uxReturn;
configASSERT( xQueue );
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
/*-----------------------------------------------------------*/
void vQueueDelete( QueueHandle_t xQueue )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
traceQUEUE_DELETE( pxQueue );
#if ( configQUEUE_REGISTRY_SIZE > 0 )
{
vQueueUnregisterQueue( pxQueue );
}
#endif
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The queue can only have been allocated dynamically - free it
again. */
vPortFree( pxQueue );
}
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The queue could have been allocated statically or dynamically, so
check before attempting to free the memory. */
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxQueue );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#else
{
/* The queue must have been statically allocated, so is not going to be
deleted. Avoid compiler warnings about the unused parameter. */
( void ) pxQueue;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
{
return ( ( Queue_t * ) xQueue )->uxQueueNumber;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
{
( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
{
return ( ( Queue_t * ) xQueue )->ucQueueType;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
{
BaseType_t xReturn = pdFALSE;
UBaseType_t uxMessagesWaiting;
/* This function is called from a critical section. */
uxMessagesWaiting = pxQueue->uxMessagesWaiting;
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
{
#if ( configUSE_MUTEXES == 1 )
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
/* The mutex is no longer being held. */
xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
pxQueue->pxMutexHolder = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_MUTEXES */
}
else if( xPosition == queueSEND_TO_BACK )
{
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
pxQueue->pcWriteTo += pxQueue->uxItemSize;
if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
pxQueue->pcWriteTo = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xPosition == queueOVERWRITE )
{
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* An item is not being added but overwritten, so subtract
one from the recorded number of items in the queue so when
one is added again below the number of recorded items remains
correct. */
--uxMessagesWaiting;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
return xReturn;
}
/*-----------------------------------------------------------*/
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
{
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
{
pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
}
}
/*-----------------------------------------------------------*/
static void prvUnlockQueue( Queue_t * const pxQueue )
{
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
/* The lock counts contains the number of extra data items placed or
removed from the queue while the queue was locked. When a queue is
locked items can be added or removed, but the event lists cannot be
updated. */
taskENTER_CRITICAL();
{
int8_t cTxLock = pxQueue->cTxLock;
/* See if data was added to the queue while it was locked. */
while( cTxLock > queueLOCKED_UNMODIFIED )
{
/* Data was posted while the queue was locked. Are any tasks
blocked waiting for data to become available? */
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting to
the queue set caused a higher priority task to unblock.
A context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Tasks that are removed from the event list will get
added to the pending ready list as the scheduler is still
suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/* Tasks that are removed from the event list will get added to
the pending ready list as the scheduler is still suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that
a context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
#endif /* configUSE_QUEUE_SETS */
--cTxLock;
}
pxQueue->cTxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL();
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
{
int8_t cRxLock = pxQueue->cRxLock;
while( cRxLock > queueLOCKED_UNMODIFIED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--cRxLock;
}
else
{
break;
}
}
pxQueue->cRxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL();
}
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
{
BaseType_t xReturn;
taskENTER_CRITICAL();
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
taskEXIT_CRITICAL();
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
{
BaseType_t xReturn;
configASSERT( xQueue );
if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
{
BaseType_t xReturn;
taskENTER_CRITICAL();
{
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
taskEXIT_CRITICAL();
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
{
BaseType_t xReturn;
configASSERT( xQueue );
if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
/*-----------------------------------------------------------*/
#if ( configUSE_CO_ROUTINES == 1 )
BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
{
BaseType_t xReturn;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* If the queue is already full we may have to block. A critical section
is required to prevent an interrupt removing something from the queue
between the check to see if the queue is full and blocking on the queue. */
portDISABLE_INTERRUPTS();
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
/* The queue is full - do we want to block or just leave without
posting? */
if( xTicksToWait > ( TickType_t ) 0 )
{
/* As this is called from a coroutine we cannot block directly, but
return indicating that we need to block. */
vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
portENABLE_INTERRUPTS();
return errQUEUE_BLOCKED;
}
else
{
portENABLE_INTERRUPTS();
return errQUEUE_FULL;
}
}
}
portENABLE_INTERRUPTS();
portDISABLE_INTERRUPTS();
{
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{
/* There is room in the queue, copy the data into the queue. */
prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
xReturn = pdPASS;
/* Were any co-routines waiting for data to become available? */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
/* In this instance the co-routine could be placed directly
into the ready list as we are within a critical section.
Instead the same pending ready list mechanism is used as if
the event were caused from within an interrupt. */
if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The co-routine waiting has a higher priority so record
that a yield might be appropriate. */
xReturn = errQUEUE_YIELD;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
xReturn = errQUEUE_FULL;
}
}
portENABLE_INTERRUPTS();
return xReturn;
}
#endif /* configUSE_CO_ROUTINES */
/*-----------------------------------------------------------*/
#if ( configUSE_CO_ROUTINES == 1 )
BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
{
BaseType_t xReturn;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* If the queue is already empty we may have to block. A critical section
is required to prevent an interrupt adding something to the queue
between the check to see if the queue is empty and blocking on the queue. */
portDISABLE_INTERRUPTS();
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
/* There are no messages in the queue, do we want to block or just
leave with nothing? */
if( xTicksToWait > ( TickType_t ) 0 )
{
/* As this is a co-routine we cannot block directly, but return
indicating that we need to block. */
vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
portENABLE_INTERRUPTS();
return errQUEUE_BLOCKED;
}
else
{
portENABLE_INTERRUPTS();
return errQUEUE_FULL;
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
portENABLE_INTERRUPTS();
portDISABLE_INTERRUPTS();
{
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Data is available from the queue. */
pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->uxMessagesWaiting );
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
xReturn = pdPASS;
/* Were any co-routines waiting for space to become available? */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
/* In this instance the co-routine could be placed directly
into the ready list as we are within a critical section.
Instead the same pending ready list mechanism is used as if
the event were caused from within an interrupt. */
if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
xReturn = errQUEUE_YIELD;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
xReturn = pdFAIL;
}
}
portENABLE_INTERRUPTS();
return xReturn;
}
#endif /* configUSE_CO_ROUTINES */
/*-----------------------------------------------------------*/
#if ( configUSE_CO_ROUTINES == 1 )
BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* Cannot block within an ISR so if there is no space on the queue then
exit without doing anything. */
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{
prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
/* We only want to wake one co-routine per ISR, so check that a
co-routine has not already been woken. */
if( xCoRoutinePreviouslyWoken == pdFALSE )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
return pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xCoRoutinePreviouslyWoken;
}
#endif /* configUSE_CO_ROUTINES */
/*-----------------------------------------------------------*/
#if ( configUSE_CO_ROUTINES == 1 )
BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
{
BaseType_t xReturn;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* We cannot block from an ISR, so check there is data available. If
not then just leave without doing anything. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Copy the data from the queue. */
pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
{
pxQueue->u.pcReadFrom = pxQueue->pcHead;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->uxMessagesWaiting );
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
if( ( *pxCoRoutineWoken ) == pdFALSE )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
*pxCoRoutineWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xReturn = pdPASS;
}
else
{
xReturn = pdFAIL;
}
return xReturn;
}
#endif /* configUSE_CO_ROUTINES */
/*-----------------------------------------------------------*/
#if ( configQUEUE_REGISTRY_SIZE > 0 )
void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
{
UBaseType_t ux;
/* See if there is an empty space in the registry. A NULL name denotes
a free slot. */
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
{
if( xQueueRegistry[ ux ].pcQueueName == NULL )
{
/* Store the information on this queue. */
xQueueRegistry[ ux ].pcQueueName = pcQueueName;
xQueueRegistry[ ux ].xHandle = xQueue;
traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#endif /* configQUEUE_REGISTRY_SIZE */
/*-----------------------------------------------------------*/
#if ( configQUEUE_REGISTRY_SIZE > 0 )
const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
{
UBaseType_t ux;
const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
/* Note there is nothing here to protect against another task adding or
removing entries from the registry while it is being searched. */
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
{
if( xQueueRegistry[ ux ].xHandle == xQueue )
{
pcReturn = xQueueRegistry[ ux ].pcQueueName;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
return pcReturn;
}
#endif /* configQUEUE_REGISTRY_SIZE */
/*-----------------------------------------------------------*/
#if ( configQUEUE_REGISTRY_SIZE > 0 )
void vQueueUnregisterQueue( QueueHandle_t xQueue )
{
UBaseType_t ux;
/* See if the handle of the queue being unregistered in actually in the
registry. */
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
{
if( xQueueRegistry[ ux ].xHandle == xQueue )
{
/* Set the name to NULL to show that this slot if free again. */
xQueueRegistry[ ux ].pcQueueName = NULL;
/* Set the handle to NULL to ensure the same queue handle cannot
appear in the registry twice if it is added, removed, then
added again. */
xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
break;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
#endif /* configQUEUE_REGISTRY_SIZE */
/*-----------------------------------------------------------*/
#if ( configUSE_TIMERS == 1 )
void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* This function should not be called by application code hence the
'Restricted' in its name. It is not part of the public API. It is
designed for use by kernel code, and has special calling requirements.
It can result in vListInsert() being called on a list that can only
possibly ever have one item in it, so the list will be fast, but even
so it should be called with the scheduler locked and not from a critical
section. */
/* Only do anything if there are no messages in the queue. This function
will not actually cause the task to block, just place it on a blocked
list. It will not block until the scheduler is unlocked - at which
time a yield will be performed. If an item is added to the queue while
the queue is locked, and the calling task blocks on the queue, then the
calling task will be immediately unblocked when the queue is unlocked. */
prvLockQueue( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{
/* There is nothing in the queue, block for the specified period. */
vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvUnlockQueue( pxQueue );
}
#endif /* configUSE_TIMERS */
/*-----------------------------------------------------------*/
#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
{
QueueSetHandle_t pxQueue;
pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
return pxQueue;
}
#endif /* configUSE_QUEUE_SETS */
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
{
BaseType_t xReturn;
taskENTER_CRITICAL();
{
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
{
/* Cannot add a queue/semaphore to more than one queue set. */
xReturn = pdFAIL;
}
else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
{
/* Cannot add a queue/semaphore to a queue set if there are already
items in the queue/semaphore. */
xReturn = pdFAIL;
}
else
{
( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
xReturn = pdPASS;
}
}
taskEXIT_CRITICAL();
return xReturn;
}
#endif /* configUSE_QUEUE_SETS */
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
{
BaseType_t xReturn;
Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
{
/* The queue was not a member of the set. */
xReturn = pdFAIL;
}
else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
{
/* It is dangerous to remove a queue from a set when the queue is
not empty because the queue set will still hold pending events for
the queue. */
xReturn = pdFAIL;
}
else
{
taskENTER_CRITICAL();
{
/* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
}
taskEXIT_CRITICAL();
xReturn = pdPASS;
}
return xReturn;
} /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
#endif /* configUSE_QUEUE_SETS */
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
{
QueueSetMemberHandle_t xReturn = NULL;
( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
return xReturn;
}
#endif /* configUSE_QUEUE_SETS */
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
{
QueueSetMemberHandle_t xReturn = NULL;
( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
return xReturn;
}
#endif /* configUSE_QUEUE_SETS */
/*-----------------------------------------------------------*/
#if ( configUSE_QUEUE_SETS == 1 )
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
{
Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
BaseType_t xReturn = pdFALSE;
/* This function must be called form a critical section. */
configASSERT( pxQueueSetContainer );
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
traceQUEUE_SEND( pxQueueSetContainer );
/* The data copied is the handle of the queue that contains data. */
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
if( cTxLock == queueUNLOCKED )
{
if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority. */
xReturn = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xReturn;
}
#endif /* configUSE_QUEUE_SETS */
// stream_buffer.c
/*
* FreeRTOS Kernel V10.0.1
* Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/* Standard includes. */
#include <stdint.h>
#include <string.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/* FreeRTOS includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "stream_buffer.h"
#if( configUSE_TASK_NOTIFICATIONS != 1 )
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
#endif
/* Lint e961 and e750 are suppressed as a MISRA exception justified because the
MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
header files above, but not in this file, in order to generate the correct
privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
/* If the user has not provided application specific Rx notification macros,
or #defined the notification macros away, them provide default implementations
that uses task notifications. */
/*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */
#ifndef sbRECEIVE_COMPLETED
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif /* sbRECEIVE_COMPLETED */
#ifndef sbRECEIVE_COMPLETED_FROM_ISR
#define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \
pxHigherPriorityTaskWoken ) \
{ \
UBaseType_t uxSavedInterruptStatus; \
\
uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \
( uint32_t ) 0, \
eNoAction, \
pxHigherPriorityTaskWoken ); \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
}
#endif /* sbRECEIVE_COMPLETED_FROM_ISR */
/* If the user has not provided an application specific Tx notification macro,
or #defined the notification macro away, them provide a default implementation
that uses task notifications. */
#ifndef sbSEND_COMPLETED
#define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif /* sbSEND_COMPLETED */
#ifndef sbSEND_COMPLETE_FROM_ISR
#define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \
{ \
UBaseType_t uxSavedInterruptStatus; \
\
uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( uint32_t ) 0, \
eNoAction, \
pxHigherPriorityTaskWoken ); \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
}
#endif /* sbSEND_COMPLETE_FROM_ISR */
/*lint -restore (9026) */
/* The number of bytes used to hold the length of a message in the buffer. */
#define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( size_t ) )
/* Bits stored in the ucFlags field of the stream buffer. */
#define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */
#define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */
/*-----------------------------------------------------------*/
/* Structure that hold state information on the buffer. */
typedef struct xSTREAM_BUFFER /*lint !e9058 Style convention uses tag. */
{
volatile size_t xTail; /* Index to the next item to read within the buffer. */
volatile size_t xHead; /* Index to the next item to write within the buffer. */
size_t xLength; /* The length of the buffer pointed to by pucBuffer. */
size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */
volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */
volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */
uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */
uint8_t ucFlags;
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */
#endif
} StreamBuffer_t;
/*
* The number of bytes available to be read from the buffer.
*/
static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION;
/*
* Add xCount bytes from pucData into the pxStreamBuffer message buffer.
* Returns the number of bytes written, which will either equal xCount in the
* success case, or 0 if there was not enough space in the buffer (in which case
* no data is written into the buffer).
*/
static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION;
/*
* If the stream buffer is being used as a message buffer, then reads an entire
* message out of the buffer. If the stream buffer is being used as a stream
* buffer then read as many bytes as possible from the buffer.
* prvReadBytesFromBuffer() is called to actually extract the bytes from the
* buffer's data storage area.
*/
static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
size_t xBytesAvailable,
size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION;
/*
* If the stream buffer is being used as a message buffer, then writes an entire
* message to the buffer. If the stream buffer is being used as a stream
* buffer then write as many bytes as possible to the buffer.
* prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's
* data storage area.
*/
static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer,
const void * pvTxData,
size_t xDataLengthBytes,
size_t xSpace,
size_t xRequiredSpace ) PRIVILEGED_FUNCTION;
/*
* Read xMaxCount bytes from the pxStreamBuffer message buffer and write them
* to pucData.
*/
static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer,
uint8_t *pucData,
size_t xMaxCount,
size_t xBytesAvailable ); PRIVILEGED_FUNCTION
/*
* Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to
* initialise the members of the newly created stream buffer structure.
*/
static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
uint8_t * const pucBuffer,
size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION;
/*-----------------------------------------------------------*/
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer )
{
uint8_t *pucAllocatedMemory;
/* In case the stream buffer is going to be used as a message buffer
(that is, it will hold discrete messages with a little meta data that
says how big the next message is) check the buffer will be large enough
to hold at least one message. */
configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH );
configASSERT( xTriggerLevelBytes <= xBufferSizeBytes );
/* A trigger level of 0 would cause a waiting task to unblock even when
the buffer was empty. */
if( xTriggerLevelBytes == ( size_t ) 0 )
{
xTriggerLevelBytes = ( size_t ) 1; /*lint !e9044 Parameter modified to ensure it doesn't have a dangerous value. */
}
/* A stream buffer requires a StreamBuffer_t structure and a buffer.
Both are allocated in a single call to pvPortMalloc(). The
StreamBuffer_t structure is placed at the start of the allocated memory
and the buffer follows immediately after. The requested size is
incremented so the free space is returned as the user would expect -
this is a quirk of the implementation that means otherwise the free
space would be reported as one byte smaller than would be logically
expected. */
xBufferSizeBytes++;
pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */
if( pucAllocatedMemory != NULL )
{
prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */
pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */
xBufferSizeBytes,
xTriggerLevelBytes,
xIsMessageBuffer );
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer );
}
else
{
traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer );
}
return ( StreamBufferHandle_t * ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
/*-----------------------------------------------------------*/
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer,
uint8_t * const pucStreamBufferStorageArea,
StaticStreamBuffer_t * const pxStaticStreamBuffer )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */
StreamBufferHandle_t xReturn;
configASSERT( pucStreamBufferStorageArea );
configASSERT( pxStaticStreamBuffer );
configASSERT( xTriggerLevelBytes <= xBufferSizeBytes );
/* A trigger level of 0 would cause a waiting task to unblock even when
the buffer was empty. */
if( xTriggerLevelBytes == ( size_t ) 0 )
{
xTriggerLevelBytes = ( size_t ) 1; /*lint !e9044 Function parameter deliberately modified to ensure it is in range. */
}
/* In case the stream buffer is going to be used as a message buffer
(that is, it will hold discrete messages with a little meta data that
says how big the next message is) check the buffer will be large enough
to hold at least one message. */
configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH );
#if( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
variable of type StaticStreamBuffer_t equals the size of the real
message buffer structure. */
volatile size_t xSize = sizeof( StaticStreamBuffer_t );
configASSERT( xSize == sizeof( StreamBuffer_t ) );
}
#endif /* configASSERT_DEFINED */
if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) )
{
prvInitialiseNewStreamBuffer( pxStreamBuffer,
pucStreamBufferStorageArea,
xBufferSizeBytes,
xTriggerLevelBytes,
xIsMessageBuffer );
/* Remember this was statically allocated in case it is ever deleted
again. */
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer );
xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */
}
else
{
xReturn = NULL;
traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer );
}
return xReturn;
}
#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
/*-----------------------------------------------------------*/
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer )
{
StreamBuffer_t * pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
configASSERT( pxStreamBuffer );
traceSTREAM_BUFFER_DELETE( xStreamBuffer );
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE )
{
#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
{
/* Both the structure and the buffer were allocated using a single call
to pvPortMalloc(), hence only one call to vPortFree() is required. */
vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */
}
#else
{
/* Should not be possible to get here, ucFlags must be corrupt.
Force an assert. */
configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 );
}
#endif
}
else
{
/* The structure and buffer were not allocated dynamically and cannot be
freed - just scrub the structure so future use will assert. */
memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) );
}
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
BaseType_t xReturn = pdFAIL, xIsMessageBuffer;
#if( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxStreamBufferNumber;
#endif
configASSERT( pxStreamBuffer );
#if( configUSE_TRACE_FACILITY == 1 )
{
/* Store the stream buffer number so it can be restored after the
reset. */
uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber;
}
#endif
/* Can only reset a message buffer if there are no tasks blocked on it. */
if( pxStreamBuffer->xTaskWaitingToReceive == NULL )
{
if( pxStreamBuffer->xTaskWaitingToSend == NULL )
{
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xIsMessageBuffer = pdTRUE;
}
else
{
xIsMessageBuffer = pdFALSE;
}
prvInitialiseNewStreamBuffer( pxStreamBuffer,
pxStreamBuffer->pucBuffer,
pxStreamBuffer->xLength,
pxStreamBuffer->xTriggerLevelBytes,
xIsMessageBuffer );
xReturn = pdPASS;
#if( configUSE_TRACE_FACILITY == 1 )
{
pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber;
}
#endif
traceSTREAM_BUFFER_RESET( xStreamBuffer );
}
}
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
BaseType_t xReturn;
configASSERT( pxStreamBuffer );
/* It is not valid for the trigger level to be 0. */
if( xTriggerLevel == ( size_t ) 0 )
{
xTriggerLevel = ( size_t ) 1; /*lint !e9044 Parameter modified to ensure it doesn't have a dangerous value. */
}
/* The trigger level is the number of bytes that must be in the stream
buffer before a task that is waiting for data is unblocked. */
if( xTriggerLevel <= pxStreamBuffer->xLength )
{
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel;
xReturn = pdPASS;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer )
{
const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xSpace;
configASSERT( pxStreamBuffer );
xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail;
xSpace -= pxStreamBuffer->xHead;
xSpace -= ( size_t ) 1;
if( xSpace >= pxStreamBuffer->xLength )
{
xSpace -= pxStreamBuffer->xLength;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xSpace;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer )
{
const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xReturn;
configASSERT( pxStreamBuffer );
xReturn = prvBytesInBuffer( pxStreamBuffer );
return xReturn;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
TickType_t xTicksToWait )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xReturn, xSpace = 0;
size_t xRequiredSpace = xDataLengthBytes;
TimeOut_t xTimeOut;
configASSERT( pvTxData );
configASSERT( pxStreamBuffer );
/* This send function is used to write to both message buffers and stream
buffers. If this is a message buffer then the space needed must be
increased by the amount of bytes needed to store the length of the
message. */
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xTicksToWait != ( TickType_t ) 0 )
{
vTaskSetTimeOutState( &xTimeOut );
do
{
/* Wait until the required number of bytes are free in the message
buffer. */
taskENTER_CRITICAL();
{
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
if( xSpace < xRequiredSpace )
{
/* Clear notification state as going to wait for space. */
( void ) xTaskNotifyStateClear( NULL );
/* Should only be one writer. */
configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL );
pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle();
}
else
{
taskEXIT_CRITICAL();
break;
}
}
taskEXIT_CRITICAL();
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
( void ) xTaskNotifyWait( ( uint32_t ) 0, UINT32_MAX, NULL, xTicksToWait );
pxStreamBuffer->xTaskWaitingToSend = NULL;
} while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xSpace == ( size_t ) 0 )
{
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace );
if( xReturn > ( size_t ) 0 )
{
traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn );
/* Was a task waiting for the data? */
if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes )
{
sbSEND_COMPLETED( pxStreamBuffer );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer );
}
return xReturn;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
const void *pvTxData,
size_t xDataLengthBytes,
BaseType_t * const pxHigherPriorityTaskWoken )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xReturn, xSpace;
size_t xRequiredSpace = xDataLengthBytes;
configASSERT( pvTxData );
configASSERT( pxStreamBuffer );
/* This send function is used to write to both message buffers and stream
buffers. If this is a message buffer then the space needed must be
increased by the amount of bytes needed to store the length of the
message. */
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace );
if( xReturn > ( size_t ) 0 )
{
/* Was a task waiting for the data? */
if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes )
{
sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn );
return xReturn;
}
/*-----------------------------------------------------------*/
static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer,
const void * pvTxData,
size_t xDataLengthBytes,
size_t xSpace,
size_t xRequiredSpace )
{
BaseType_t xShouldWrite;
size_t xReturn;
if( xSpace == ( size_t ) 0 )
{
/* Doesn't matter if this is a stream buffer or a message buffer, there
is no space to write. */
xShouldWrite = pdFALSE;
}
else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 )
{
/* This is a stream buffer, as opposed to a message buffer, so writing a
stream of bytes rather than discrete messages. Write as many bytes as
possible. */
xShouldWrite = pdTRUE;
xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); /*lint !e9044 Function parameter modified to ensure it is capped to available space. */
}
else if( xSpace >= xRequiredSpace )
{
/* This is a message buffer, as opposed to a stream buffer, and there
is enough space to write both the message length and the message itself
into the buffer. Start by writing the length of the data, the data
itself will be written later in this function. */
xShouldWrite = pdTRUE;
( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH );
}
else
{
/* There is space available, but not enough space. */
xShouldWrite = pdFALSE;
}
if( xShouldWrite != pdFALSE )
{
/* Writes the data itself. */
xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */
}
else
{
xReturn = 0;
}
return xReturn;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
TickType_t xTicksToWait )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength;
configASSERT( pvRxData );
configASSERT( pxStreamBuffer );
/* This receive function is used by both message buffers, which store
discrete messages, and stream buffers, which store a continuous stream of
bytes. Discrete messages include an additional
sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the
message. */
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH;
}
else
{
xBytesToStoreMessageLength = 0;
}
if( xTicksToWait != ( TickType_t ) 0 )
{
/* Checking if there is data and clearing the notification state must be
performed atomically. */
taskENTER_CRITICAL();
{
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
/* If this function was invoked by a message buffer read then
xBytesToStoreMessageLength holds the number of bytes used to hold
the length of the next discrete message. If this function was
invoked by a stream buffer read then xBytesToStoreMessageLength will
be 0. */
if( xBytesAvailable <= xBytesToStoreMessageLength )
{
/* Clear notification state as going to wait for data. */
( void ) xTaskNotifyStateClear( NULL );
/* Should only be one reader. */
configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL );
pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
if( xBytesAvailable <= xBytesToStoreMessageLength )
{
/* Wait for data to be available. */
traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer );
( void ) xTaskNotifyWait( ( uint32_t ) 0, UINT32_MAX, NULL, xTicksToWait );
pxStreamBuffer->xTaskWaitingToReceive = NULL;
/* Recheck the data available after blocking. */
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
}
/* Whether receiving a discrete message (where xBytesToStoreMessageLength
holds the number of bytes used to store the message length) or a stream of
bytes (where xBytesToStoreMessageLength is zero), the number of bytes
available must be greater than xBytesToStoreMessageLength to be able to
read bytes from the buffer. */
if( xBytesAvailable > xBytesToStoreMessageLength )
{
xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength );
/* Was a task waiting for space in the buffer? */
if( xReceivedLength != ( size_t ) 0 )
{
traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength );
sbRECEIVE_COMPLETED( pxStreamBuffer );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer );
mtCOVERAGE_TEST_MARKER();
}
return xReceivedLength;
}
/*-----------------------------------------------------------*/
size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
BaseType_t * const pxHigherPriorityTaskWoken )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength;
configASSERT( pvRxData );
configASSERT( pxStreamBuffer );
/* This receive function is used by both message buffers, which store
discrete messages, and stream buffers, which store a continuous stream of
bytes. Discrete messages include an additional
sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the
message. */
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH;
}
else
{
xBytesToStoreMessageLength = 0;
}
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
/* Whether receiving a discrete message (where xBytesToStoreMessageLength
holds the number of bytes used to store the message length) or a stream of
bytes (where xBytesToStoreMessageLength is zero), the number of bytes
available must be greater than xBytesToStoreMessageLength to be able to
read bytes from the buffer. */
if( xBytesAvailable > xBytesToStoreMessageLength )
{
xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength );
/* Was a task waiting for space in the buffer? */
if( xReceivedLength != ( size_t ) 0 )
{
sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength );
return xReceivedLength;
}
/*-----------------------------------------------------------*/
static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer,
void *pvRxData,
size_t xBufferLengthBytes,
size_t xBytesAvailable,
size_t xBytesToStoreMessageLength )
{
size_t xOriginalTail, xReceivedLength, xNextMessageLength;
if( xBytesToStoreMessageLength != ( size_t ) 0 )
{
/* A discrete message is being received. First receive the length
of the message. A copy of the tail is stored so the buffer can be
returned to its prior state if the length of the message is too
large for the provided buffer. */
xOriginalTail = pxStreamBuffer->xTail;
( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable );
/* Reduce the number of bytes available by the number of bytes just
read out. */
xBytesAvailable -= xBytesToStoreMessageLength;
/* Check there is enough space in the buffer provided by the
user. */
if( xNextMessageLength > xBufferLengthBytes )
{
/* The user has provided insufficient space to read the message
so return the buffer to its previous state (so the length of
the message is in the buffer again). */
pxStreamBuffer->xTail = xOriginalTail;
xNextMessageLength = 0;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* A stream of bytes is being received (as opposed to a discrete
message), so read as many bytes as possible. */
xNextMessageLength = xBufferLengthBytes;
}
/* Read the actual data. */
xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */
return xReceivedLength;
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer )
{
const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
BaseType_t xReturn;
size_t xTail;
configASSERT( pxStreamBuffer );
/* True if no bytes are available. */
xTail = pxStreamBuffer->xTail;
if( pxStreamBuffer->xHead == xTail )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer )
{
BaseType_t xReturn;
size_t xBytesToStoreMessageLength;
const StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
configASSERT( pxStreamBuffer );
/* This generic version of the receive function is used by both message
buffers, which store discrete messages, and stream buffers, which store a
continuous stream of bytes. Discrete messages include an additional
sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */
if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 )
{
xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH;
}
else
{
xBytesToStoreMessageLength = 0;
}
/* True if the available space equals zero. */
if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
configASSERT( pxStreamBuffer );
uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL )
{
( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive,
( uint32_t ) 0,
eNoAction,
pxHigherPriorityTaskWoken );
( pxStreamBuffer )->xTaskWaitingToReceive = NULL;
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken )
{
StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) xStreamBuffer; /*lint !e9087 !e9079 Safe cast as StreamBufferHandle_t is opaque Streambuffer_t. */
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
configASSERT( pxStreamBuffer );
uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR();
{
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL )
{
( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend,
( uint32_t ) 0,
eNoAction,
pxHigherPriorityTaskWoken );
( pxStreamBuffer )->xTaskWaitingToSend = NULL;
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
/*-----------------------------------------------------------*/
static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount )
{
size_t xNextHead, xFirstLength;
configASSERT( xCount > ( size_t ) 0 );
xNextHead = pxStreamBuffer->xHead;
/* Calculate the number of bytes that can be added in the first write -
which may be less than the total number of bytes that need to be added if
the buffer will wrap back to the beginning. */
xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount );
/* Write as many bytes as can be written in the first write. */
configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength );
memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */
/* If the number of bytes written was less than the number that could be
written in the first write... */
if( xCount > xFirstLength )
{
/* ...then write the remaining bytes to the start of the buffer. */
configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength );
memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
xNextHead += xCount;
if( xNextHead >= pxStreamBuffer->xLength )
{
xNextHead -= pxStreamBuffer->xLength;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
pxStreamBuffer->xHead = xNextHead;
return xCount;
}
/*-----------------------------------------------------------*/
static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable )
{
size_t xCount, xFirstLength, xNextTail;
/* Use the minimum of the wanted bytes and the available bytes. */
xCount = configMIN( xBytesAvailable, xMaxCount );
if( xCount > ( size_t ) 0 )
{
xNextTail = pxStreamBuffer->xTail;
/* Calculate the number of bytes that can be read - which may be
less than the number wanted if the data wraps around to the start of
the buffer. */
xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount );
/* Obtain the number of bytes it is possible to obtain in the first
read. Asserts check bounds of read and write. */
configASSERT( xFirstLength <= xMaxCount );
configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength );
memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */
/* If the total number of wanted bytes is greater than the number
that could be read in the first read... */
if( xCount > xFirstLength )
{
/*...then read the remaining bytes from the start of the buffer. */
configASSERT( xCount <= xMaxCount );
memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Move the tail pointer to effectively remove the data read from
the buffer. */
xNextTail += xCount;
if( xNextTail >= pxStreamBuffer->xLength )
{
xNextTail -= pxStreamBuffer->xLength;
}
pxStreamBuffer->xTail = xNextTail;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xCount;
}
/*-----------------------------------------------------------*/
static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer )
{
/* Returns the distance between xTail and xHead. */
size_t xCount;
xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead;
xCount -= pxStreamBuffer->xTail;
if ( xCount >= pxStreamBuffer->xLength )
{
xCount -= pxStreamBuffer->xLength;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
return xCount;
}
/*-----------------------------------------------------------*/
static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
uint8_t * const pucBuffer,
size_t xBufferSizeBytes,
size_t xTriggerLevelBytes,
BaseType_t xIsMessageBuffer )
{
/* Assert here is deliberately writing to the entire buffer to ensure it can
be written to without generating exceptions, and is setting the buffer to a
known value to assist in development/debugging. */
#if( configASSERT_DEFINED == 1 )
{
/* The value written just has to be identifiable when looking at the
memory. Don't use 0xA5 as that is the stack fill value and could
result in confusion as to what is actually being observed. */
const BaseType_t xWriteValue = 0x55;
configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer );
}
#endif
memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */
pxStreamBuffer->pucBuffer = pucBuffer;
pxStreamBuffer->xLength = xBufferSizeBytes;
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
if( xIsMessageBuffer != pdFALSE )
{
pxStreamBuffer->ucFlags |= sbFLAGS_IS_MESSAGE_BUFFER;
}
}
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer )
{
return ( ( StreamBuffer_t * ) xStreamBuffer )->uxStreamBufferNumber;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber )
{
( ( StreamBuffer_t * ) xStreamBuffer )->uxStreamBufferNumber = uxStreamBufferNumber;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
#if ( configUSE_TRACE_FACILITY == 1 )
uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer )
{
return ( ( StreamBuffer_t * )xStreamBuffer )->ucFlags | sbFLAGS_IS_MESSAGE_BUFFER;
}
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
U20_FreeRTOS_portable_差异1
最新推荐文章于 2024-10-30 21:19:39 发布