[FL-1326] Move heap4 allocator to furi core and add allocation tracing capability (#529)
* Move heap memory allocator to furi core * Newlibc: add malloc guards * Furi: add cmsis thread id getter * Core: add tracing capability to heap allocator. Add heap tracing support to furi thread. Add heap trace support to app-loader.
This commit is contained in:
		
							parent
							
								
									6ec9c6cc49
								
							
						
					
					
						commit
						359bbdfe69
					
				
							
								
								
									
										9
									
								
								applications/app-loader/app-loader.c
									
									
									
									
									
										
										
										Executable file → Normal file
									
								
							
							
						
						
									
										9
									
								
								applications/app-loader/app-loader.c
									
									
									
									
									
										
										
										Executable file → Normal file
									
								
							| @ -87,9 +87,13 @@ bool app_loader_start(const char* name, const char* args) { | ||||
|     return furi_thread_start(state.thread); | ||||
| } | ||||
| 
 | ||||
| void app_loader_thread_state_callback(FuriThreadState state, void* context) { | ||||
| void app_loader_thread_state_callback(FuriThreadState thread_state, void* context) { | ||||
|     furi_assert(context); | ||||
|     if(state == FuriThreadStateStopped) { | ||||
|     if(thread_state == FuriThreadStateStopped) { | ||||
|         FURI_LOG_I( | ||||
|             APP_LOADER_TAG, | ||||
|             "Application thread stopped, heap leaked: %d", | ||||
|             furi_thread_get_heap_size(state.thread)); | ||||
|         api_hal_power_insomnia_exit(); | ||||
|     } | ||||
| } | ||||
| @ -97,6 +101,7 @@ void app_loader_thread_state_callback(FuriThreadState state, void* context) { | ||||
| int32_t app_loader(void* p) { | ||||
|     FURI_LOG_I(APP_LOADER_TAG, "Starting"); | ||||
|     state.thread = furi_thread_alloc(); | ||||
|     furi_thread_enable_heap_trace(state.thread); | ||||
|     furi_thread_set_state_context(state.thread, &state); | ||||
|     furi_thread_set_state_callback(state.thread, app_loader_thread_state_callback); | ||||
|     string_init(state.args); | ||||
|  | ||||
| @ -5,6 +5,7 @@ | ||||
| #include <furi/common_defines.h> | ||||
| #include <furi/check.h> | ||||
| #include <furi/memmgr.h> | ||||
| #include <furi/memmgr_heap.h> | ||||
| #include <furi/pubsub.h> | ||||
| #include <furi/record.h> | ||||
| #include <furi/stdglue.h> | ||||
|  | ||||
							
								
								
									
										481
									
								
								core/furi/memmgr_heap.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										481
									
								
								core/furi/memmgr_heap.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,481 @@ | ||||
| /*
 | ||||
|  * FreeRTOS Kernel V10.2.1 | ||||
|  * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved. | ||||
|  * | ||||
|  * Permission is hereby granted, free of charge, to any person obtaining a copy of | ||||
|  * this software and associated documentation files (the "Software"), to deal in | ||||
|  * the Software without restriction, including without limitation the rights to | ||||
|  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | ||||
|  * the Software, and to permit persons to whom the Software is furnished to do so, | ||||
|  * subject to the following conditions: | ||||
|  * | ||||
|  * The above copyright notice and this permission notice shall be included in all | ||||
|  * copies or substantial portions of the Software. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | ||||
|  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | ||||
|  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||||
|  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
|  * | ||||
|  * http://www.FreeRTOS.org
 | ||||
|  * http://aws.amazon.com/freertos
 | ||||
|  * | ||||
|  * 1 tab == 4 spaces! | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * A sample implementation of pvPortMalloc() and vPortFree() that combines | ||||
|  * (coalescences) adjacent memory blocks as they are freed, and in so doing | ||||
|  * limits memory fragmentation. | ||||
|  * | ||||
|  * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the | ||||
|  * memory management pages of http://www.FreeRTOS.org for more information.
 | ||||
|  */ | ||||
| 
 | ||||
| #include "memmgr_heap.h" | ||||
| #include "check.h" | ||||
| #include <stdlib.h> | ||||
| #include <cmsis_os2.h> | ||||
| 
 | ||||
| /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
 | ||||
| all the API functions to use the MPU wrappers.  That should only be done when | ||||
| task.h is included from an application file. */ | ||||
| #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE | ||||
| 
 | ||||
| #include "FreeRTOS.h" | ||||
| #include "task.h" | ||||
| 
 | ||||
| #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE | ||||
| 
 | ||||
| #if(configSUPPORT_DYNAMIC_ALLOCATION == 0) | ||||
| #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 | ||||
| #endif | ||||
| 
 | ||||
| /* Block sizes must not get too small. */ | ||||
| #define heapMINIMUM_BLOCK_SIZE ((size_t)(xHeapStructSize << 1)) | ||||
| 
 | ||||
| /* Assumes 8bit bytes! */ | ||||
| #define heapBITS_PER_BYTE ((size_t)8) | ||||
| 
 | ||||
| /* Allocate the memory for the heap. */ | ||||
| #if(configAPPLICATION_ALLOCATED_HEAP == 1) | ||||
| /* The application writer has already defined the array used for the RTOS
 | ||||
|     heap - probably so it can be placed in a special segment or address. */ | ||||
| extern uint8_t ucHeap[configTOTAL_HEAP_SIZE]; | ||||
| #else | ||||
| static uint8_t ucHeap[configTOTAL_HEAP_SIZE]; | ||||
| #endif /* configAPPLICATION_ALLOCATED_HEAP */ | ||||
| 
 | ||||
| /* Define the linked list structure.  This is used to link free blocks in order
 | ||||
| of their memory address. */ | ||||
| typedef struct A_BLOCK_LINK { | ||||
|     struct A_BLOCK_LINK* pxNextFreeBlock; /*<< The next free block in the list. */ | ||||
|     size_t xBlockSize; /*<< The size of the free block. */ | ||||
| } BlockLink_t; | ||||
| 
 | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| /*
 | ||||
|  * Inserts a block of memory that is being freed into the correct position in | ||||
|  * the list of free memory blocks.  The block being freed will be merged with | ||||
|  * the block in front it and/or the block behind it if the memory blocks are | ||||
|  * adjacent to each other. | ||||
|  */ | ||||
| static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert); | ||||
| 
 | ||||
| /*
 | ||||
|  * Called automatically to setup the required heap structures the first time | ||||
|  * pvPortMalloc() is called. | ||||
|  */ | ||||
| static void prvHeapInit(void); | ||||
| 
 | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| /* The size of the structure placed at the beginning of each allocated memory
 | ||||
| block must by correctly byte aligned. */ | ||||
| static const size_t xHeapStructSize = (sizeof(BlockLink_t) + ((size_t)(portBYTE_ALIGNMENT - 1))) & | ||||
|                                       ~((size_t)portBYTE_ALIGNMENT_MASK); | ||||
| 
 | ||||
| /* Create a couple of list links to mark the start and end of the list. */ | ||||
| static BlockLink_t xStart, *pxEnd = NULL; | ||||
| 
 | ||||
| /* Keeps track of the number of free bytes remaining, but says nothing about
 | ||||
| fragmentation. */ | ||||
| static size_t xFreeBytesRemaining = 0U; | ||||
| static size_t xMinimumEverFreeBytesRemaining = 0U; | ||||
| 
 | ||||
| /* Gets set to the top bit of an size_t type.  When this bit in the xBlockSize
 | ||||
| member of an BlockLink_t structure is set then the block belongs to the | ||||
| application.  When the bit is free the block is still part of the free heap | ||||
| space. */ | ||||
| static size_t xBlockAllocatedBit = 0; | ||||
| 
 | ||||
| /* Furi heap extension */ | ||||
| #include <m-dict.h> | ||||
| 
 | ||||
| /* Allocation tracking types */ | ||||
| DICT_DEF2(MemmgrHeapAllocDict, uint32_t, uint32_t) | ||||
| DICT_DEF2( | ||||
|     MemmgrHeapThreadDict, | ||||
|     uint32_t, | ||||
|     M_DEFAULT_OPLIST, | ||||
|     MemmgrHeapAllocDict_t, | ||||
|     DICT_OPLIST(MemmgrHeapAllocDict)) | ||||
| 
 | ||||
| /* Thread allocation tracing storage */ | ||||
| static MemmgrHeapThreadDict_t memmgr_heap_thread_dict = {0}; | ||||
| static volatile uint32_t memmgr_heap_thread_trace_depth = 0; | ||||
| 
 | ||||
| /* Initialize tracing storage on start */ | ||||
| void memmgr_heap_init() { | ||||
|     MemmgrHeapThreadDict_init(memmgr_heap_thread_dict); | ||||
| } | ||||
| 
 | ||||
| void memmgr_heap_enable_thread_trace(osThreadId_t thread_id) { | ||||
|     vTaskSuspendAll(); | ||||
|     { | ||||
|         memmgr_heap_thread_trace_depth++; | ||||
|         furi_assert( | ||||
|             MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id) == NULL); | ||||
|         MemmgrHeapAllocDict_t alloc_dict; | ||||
|         MemmgrHeapAllocDict_init(alloc_dict); | ||||
|         MemmgrHeapThreadDict_set_at(memmgr_heap_thread_dict, (uint32_t)thread_id, alloc_dict); | ||||
|         memmgr_heap_thread_trace_depth--; | ||||
|     } | ||||
|     (void)xTaskResumeAll(); | ||||
| } | ||||
| 
 | ||||
| void memmgr_heap_disable_thread_trace(osThreadId_t thread_id) { | ||||
|     vTaskSuspendAll(); | ||||
|     { | ||||
|         memmgr_heap_thread_trace_depth++; | ||||
|         furi_assert( | ||||
|             MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id) != NULL); | ||||
|         MemmgrHeapThreadDict_erase(memmgr_heap_thread_dict, (uint32_t)thread_id); | ||||
|         memmgr_heap_thread_trace_depth--; | ||||
|     } | ||||
|     (void)xTaskResumeAll(); | ||||
| } | ||||
| 
 | ||||
| size_t memmgr_heap_get_thread_memory(osThreadId_t thread_id) { | ||||
|     size_t leftovers = 0; | ||||
|     vTaskSuspendAll(); | ||||
|     { | ||||
|         memmgr_heap_thread_trace_depth++; | ||||
|         MemmgrHeapAllocDict_t* alloc_dict = | ||||
|             MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id); | ||||
|         MemmgrHeapAllocDict_it_t alloc_dict_it; | ||||
|         for(MemmgrHeapAllocDict_it(alloc_dict_it, *alloc_dict); | ||||
|             !MemmgrHeapAllocDict_end_p(alloc_dict_it); | ||||
|             MemmgrHeapAllocDict_next(alloc_dict_it)) { | ||||
|             MemmgrHeapAllocDict_itref_t* data = MemmgrHeapAllocDict_ref(alloc_dict_it); | ||||
|             leftovers += data->value; | ||||
|         } | ||||
|         memmgr_heap_thread_trace_depth--; | ||||
|     } | ||||
|     (void)xTaskResumeAll(); | ||||
|     return leftovers; | ||||
| } | ||||
| 
 | ||||
| #undef traceMALLOC | ||||
| static inline void traceMALLOC(void* pointer, size_t size) { | ||||
|     osThreadId_t thread_id = osThreadGetId(); | ||||
|     if(thread_id && memmgr_heap_thread_trace_depth == 0) { | ||||
|         memmgr_heap_thread_trace_depth++; | ||||
|         MemmgrHeapAllocDict_t* alloc_dict = | ||||
|             MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id); | ||||
|         if(alloc_dict) { | ||||
|             MemmgrHeapAllocDict_set_at(*alloc_dict, (uint32_t)pointer, (uint32_t)size); | ||||
|         } | ||||
|         memmgr_heap_thread_trace_depth--; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #undef traceFREE | ||||
| static inline void traceFREE(void* pointer, size_t size) { | ||||
|     osThreadId_t thread_id = osThreadGetId(); | ||||
|     if(thread_id && memmgr_heap_thread_trace_depth == 0) { | ||||
|         memmgr_heap_thread_trace_depth++; | ||||
|         MemmgrHeapAllocDict_t* alloc_dict = | ||||
|             MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id); | ||||
|         if(alloc_dict) { | ||||
|             MemmgrHeapAllocDict_erase(*alloc_dict, (uint32_t)pointer); | ||||
|         } | ||||
|         memmgr_heap_thread_trace_depth--; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| void* pvPortMalloc(size_t xWantedSize) { | ||||
|     BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; | ||||
|     void* pvReturn = NULL; | ||||
| 
 | ||||
|     vTaskSuspendAll(); | ||||
|     { | ||||
|         /* If this is the first call to malloc then the heap will require
 | ||||
|         initialisation to setup the list of free blocks. */ | ||||
|         if(pxEnd == NULL) { | ||||
|             prvHeapInit(); | ||||
|             memmgr_heap_init(); | ||||
|         } else { | ||||
|             mtCOVERAGE_TEST_MARKER(); | ||||
|         } | ||||
| 
 | ||||
|         /* Check the requested block size is not so large that the top bit is
 | ||||
|         set.  The top bit of the block size member of the BlockLink_t structure | ||||
|         is used to determine who owns the block - the application or the | ||||
|         kernel, so it must be free. */ | ||||
|         if((xWantedSize & xBlockAllocatedBit) == 0) { | ||||
|             /* The wanted size is increased so it can contain a BlockLink_t
 | ||||
|             structure in addition to the requested amount of bytes. */ | ||||
|             if(xWantedSize > 0) { | ||||
|                 xWantedSize += xHeapStructSize; | ||||
| 
 | ||||
|                 /* Ensure that blocks are always aligned to the required number
 | ||||
|                 of bytes. */ | ||||
|                 if((xWantedSize & portBYTE_ALIGNMENT_MASK) != 0x00) { | ||||
|                     /* Byte alignment required. */ | ||||
|                     xWantedSize += (portBYTE_ALIGNMENT - (xWantedSize & portBYTE_ALIGNMENT_MASK)); | ||||
|                     configASSERT((xWantedSize & portBYTE_ALIGNMENT_MASK) == 0); | ||||
|                 } else { | ||||
|                     mtCOVERAGE_TEST_MARKER(); | ||||
|                 } | ||||
|             } else { | ||||
|                 mtCOVERAGE_TEST_MARKER(); | ||||
|             } | ||||
| 
 | ||||
|             if((xWantedSize > 0) && (xWantedSize <= xFreeBytesRemaining)) { | ||||
|                 /* Traverse the list from the start (lowest address) block until
 | ||||
|                 one of adequate size is found. */ | ||||
|                 pxPreviousBlock = &xStart; | ||||
|                 pxBlock = xStart.pxNextFreeBlock; | ||||
|                 while((pxBlock->xBlockSize < xWantedSize) && (pxBlock->pxNextFreeBlock != NULL)) { | ||||
|                     pxPreviousBlock = pxBlock; | ||||
|                     pxBlock = pxBlock->pxNextFreeBlock; | ||||
|                 } | ||||
| 
 | ||||
|                 /* If the end marker was reached then a block of adequate size
 | ||||
|                 was not found. */ | ||||
|                 if(pxBlock != pxEnd) { | ||||
|                     /* Return the memory space pointed to - jumping over the
 | ||||
|                     BlockLink_t structure at its start. */ | ||||
|                     pvReturn = | ||||
|                         (void*)(((uint8_t*)pxPreviousBlock->pxNextFreeBlock) + xHeapStructSize); | ||||
| 
 | ||||
|                     /* This block is being returned for use so must be taken out
 | ||||
|                     of the list of free blocks. */ | ||||
|                     pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; | ||||
| 
 | ||||
|                     /* If the block is larger than required it can be split into
 | ||||
|                     two. */ | ||||
|                     if((pxBlock->xBlockSize - xWantedSize) > heapMINIMUM_BLOCK_SIZE) { | ||||
|                         /* This block is to be split into two.  Create a new
 | ||||
|                         block following the number of bytes requested. The void | ||||
|                         cast is used to prevent byte alignment warnings from the | ||||
|                         compiler. */ | ||||
|                         pxNewBlockLink = (void*)(((uint8_t*)pxBlock) + xWantedSize); | ||||
|                         configASSERT((((size_t)pxNewBlockLink) & portBYTE_ALIGNMENT_MASK) == 0); | ||||
| 
 | ||||
|                         /* Calculate the sizes of two blocks split from the
 | ||||
|                         single block. */ | ||||
|                         pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; | ||||
|                         pxBlock->xBlockSize = xWantedSize; | ||||
| 
 | ||||
|                         /* Insert the new block into the list of free blocks. */ | ||||
|                         prvInsertBlockIntoFreeList(pxNewBlockLink); | ||||
|                     } else { | ||||
|                         mtCOVERAGE_TEST_MARKER(); | ||||
|                     } | ||||
| 
 | ||||
|                     xFreeBytesRemaining -= pxBlock->xBlockSize; | ||||
| 
 | ||||
|                     if(xFreeBytesRemaining < xMinimumEverFreeBytesRemaining) { | ||||
|                         xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; | ||||
|                     } else { | ||||
|                         mtCOVERAGE_TEST_MARKER(); | ||||
|                     } | ||||
| 
 | ||||
|                     /* The block is being returned - it is allocated and owned
 | ||||
|                     by the application and has no "next" block. */ | ||||
|                     pxBlock->xBlockSize |= xBlockAllocatedBit; | ||||
|                     pxBlock->pxNextFreeBlock = NULL; | ||||
|                 } else { | ||||
|                     mtCOVERAGE_TEST_MARKER(); | ||||
|                 } | ||||
|             } else { | ||||
|                 mtCOVERAGE_TEST_MARKER(); | ||||
|             } | ||||
|         } else { | ||||
|             mtCOVERAGE_TEST_MARKER(); | ||||
|         } | ||||
| 
 | ||||
|         traceMALLOC(pvReturn, xWantedSize); | ||||
|     } | ||||
|     (void)xTaskResumeAll(); | ||||
| 
 | ||||
| #if(configUSE_MALLOC_FAILED_HOOK == 1) | ||||
|     { | ||||
|         if(pvReturn == NULL) { | ||||
|             extern void vApplicationMallocFailedHook(void); | ||||
|             vApplicationMallocFailedHook(); | ||||
|         } else { | ||||
|             mtCOVERAGE_TEST_MARKER(); | ||||
|         } | ||||
|     } | ||||
| #endif | ||||
| 
 | ||||
|     configASSERT((((size_t)pvReturn) & (size_t)portBYTE_ALIGNMENT_MASK) == 0); | ||||
|     return pvReturn; | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| void vPortFree(void* pv) { | ||||
|     uint8_t* puc = (uint8_t*)pv; | ||||
|     BlockLink_t* pxLink; | ||||
| 
 | ||||
|     if(pv != NULL) { | ||||
|         /* The memory being freed will have an BlockLink_t structure immediately
 | ||||
|         before it. */ | ||||
|         puc -= xHeapStructSize; | ||||
| 
 | ||||
|         /* This casting is to keep the compiler from issuing warnings. */ | ||||
|         pxLink = (void*)puc; | ||||
| 
 | ||||
|         /* Check the block is actually allocated. */ | ||||
|         configASSERT((pxLink->xBlockSize & xBlockAllocatedBit) != 0); | ||||
|         configASSERT(pxLink->pxNextFreeBlock == NULL); | ||||
| 
 | ||||
|         if((pxLink->xBlockSize & xBlockAllocatedBit) != 0) { | ||||
|             if(pxLink->pxNextFreeBlock == NULL) { | ||||
|                 /* The block is being returned to the heap - it is no longer
 | ||||
|                 allocated. */ | ||||
|                 pxLink->xBlockSize &= ~xBlockAllocatedBit; | ||||
| 
 | ||||
|                 vTaskSuspendAll(); | ||||
|                 { | ||||
|                     /* Add this block to the list of free blocks. */ | ||||
|                     xFreeBytesRemaining += pxLink->xBlockSize; | ||||
|                     traceFREE(pv, pxLink->xBlockSize); | ||||
|                     prvInsertBlockIntoFreeList(((BlockLink_t*)pxLink)); | ||||
|                 } | ||||
|                 (void)xTaskResumeAll(); | ||||
|             } else { | ||||
|                 mtCOVERAGE_TEST_MARKER(); | ||||
|             } | ||||
|         } else { | ||||
|             mtCOVERAGE_TEST_MARKER(); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| size_t xPortGetFreeHeapSize(void) { | ||||
|     return xFreeBytesRemaining; | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| size_t xPortGetMinimumEverFreeHeapSize(void) { | ||||
|     return xMinimumEverFreeBytesRemaining; | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| void vPortInitialiseBlocks(void) { | ||||
|     /* This just exists to keep the linker quiet. */ | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| static void prvHeapInit(void) { | ||||
|     BlockLink_t* pxFirstFreeBlock; | ||||
|     uint8_t* pucAlignedHeap; | ||||
|     size_t uxAddress; | ||||
|     size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; | ||||
| 
 | ||||
|     /* Ensure the heap starts on a correctly aligned boundary. */ | ||||
|     uxAddress = (size_t)ucHeap; | ||||
| 
 | ||||
|     if((uxAddress & portBYTE_ALIGNMENT_MASK) != 0) { | ||||
|         uxAddress += (portBYTE_ALIGNMENT - 1); | ||||
|         uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK); | ||||
|         xTotalHeapSize -= uxAddress - (size_t)ucHeap; | ||||
|     } | ||||
| 
 | ||||
|     pucAlignedHeap = (uint8_t*)uxAddress; | ||||
| 
 | ||||
|     /* xStart is used to hold a pointer to the first item in the list of free
 | ||||
|     blocks.  The void cast is used to prevent compiler warnings. */ | ||||
|     xStart.pxNextFreeBlock = (void*)pucAlignedHeap; | ||||
|     xStart.xBlockSize = (size_t)0; | ||||
| 
 | ||||
|     /* pxEnd is used to mark the end of the list of free blocks and is inserted
 | ||||
|     at the end of the heap space. */ | ||||
|     uxAddress = ((size_t)pucAlignedHeap) + xTotalHeapSize; | ||||
|     uxAddress -= xHeapStructSize; | ||||
|     uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK); | ||||
|     pxEnd = (void*)uxAddress; | ||||
|     pxEnd->xBlockSize = 0; | ||||
|     pxEnd->pxNextFreeBlock = NULL; | ||||
| 
 | ||||
|     /* To start with there is a single free block that is sized to take up the
 | ||||
|     entire heap space, minus the space taken by pxEnd. */ | ||||
|     pxFirstFreeBlock = (void*)pucAlignedHeap; | ||||
|     pxFirstFreeBlock->xBlockSize = uxAddress - (size_t)pxFirstFreeBlock; | ||||
|     pxFirstFreeBlock->pxNextFreeBlock = pxEnd; | ||||
| 
 | ||||
|     /* Only one block exists - and it covers the entire usable heap space. */ | ||||
|     xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; | ||||
|     xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; | ||||
| 
 | ||||
|     /* Work out the position of the top bit in a size_t variable. */ | ||||
|     xBlockAllocatedBit = ((size_t)1) << ((sizeof(size_t) * heapBITS_PER_BYTE) - 1); | ||||
| } | ||||
| /*-----------------------------------------------------------*/ | ||||
| 
 | ||||
| static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert) { | ||||
|     BlockLink_t* pxIterator; | ||||
|     uint8_t* puc; | ||||
| 
 | ||||
|     /* Iterate through the list until a block is found that has a higher address
 | ||||
|     than the block being inserted. */ | ||||
|     for(pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; | ||||
|         pxIterator = pxIterator->pxNextFreeBlock) { | ||||
|         /* Nothing to do here, just iterate to the right position. */ | ||||
|     } | ||||
| 
 | ||||
|     /* Do the block being inserted, and the block it is being inserted after
 | ||||
|     make a contiguous block of memory? */ | ||||
|     puc = (uint8_t*)pxIterator; | ||||
|     if((puc + pxIterator->xBlockSize) == (uint8_t*)pxBlockToInsert) { | ||||
|         pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; | ||||
|         pxBlockToInsert = pxIterator; | ||||
|     } else { | ||||
|         mtCOVERAGE_TEST_MARKER(); | ||||
|     } | ||||
| 
 | ||||
|     /* Do the block being inserted, and the block it is being inserted before
 | ||||
|     make a contiguous block of memory? */ | ||||
|     puc = (uint8_t*)pxBlockToInsert; | ||||
|     if((puc + pxBlockToInsert->xBlockSize) == (uint8_t*)pxIterator->pxNextFreeBlock) { | ||||
|         if(pxIterator->pxNextFreeBlock != pxEnd) { | ||||
|             /* Form one big block from the two blocks. */ | ||||
|             pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; | ||||
|             pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; | ||||
|         } else { | ||||
|             pxBlockToInsert->pxNextFreeBlock = pxEnd; | ||||
|         } | ||||
|     } else { | ||||
|         pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; | ||||
|     } | ||||
| 
 | ||||
|     /* If the block being inserted plugged a gab, so was merged with the block
 | ||||
|     before and the block after, then it's pxNextFreeBlock pointer will have | ||||
|     already been set, and should not be set here as that would make it point | ||||
|     to itself. */ | ||||
|     if(pxIterator != pxBlockToInsert) { | ||||
|         pxIterator->pxNextFreeBlock = pxBlockToInsert; | ||||
|     } else { | ||||
|         mtCOVERAGE_TEST_MARKER(); | ||||
|     } | ||||
| } | ||||
							
								
								
									
										28
									
								
								core/furi/memmgr_heap.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								core/furi/memmgr_heap.h
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,28 @@ | ||||
| #pragma once | ||||
| 
 | ||||
| #include <stdint.h> | ||||
| #include <cmsis_os2.h> | ||||
| 
 | ||||
| #ifdef __cplusplus | ||||
| extern "C" { | ||||
| #endif | ||||
| 
 | ||||
| /** Memmgr heap enable thread allocation tracking
 | ||||
|  * @param thread_id - thread id to track | ||||
|  */ | ||||
| void memmgr_heap_enable_thread_trace(osThreadId_t thread_id); | ||||
| 
 | ||||
| /** Memmgr heap disable thread allocation tracking
 | ||||
|  * @param thread_id - thread id to track | ||||
|  */ | ||||
| void memmgr_heap_disable_thread_trace(osThreadId_t thread_id); | ||||
| 
 | ||||
| /** Memmgr heap get allocatred thread memory
 | ||||
|  * @param thread_id - thread id to track | ||||
|  * @return bytes allocated right now | ||||
|  */ | ||||
| size_t memmgr_heap_get_thread_memory(osThreadId_t thread_id); | ||||
| 
 | ||||
| #ifdef __cplusplus | ||||
| } | ||||
| #endif | ||||
| @ -7,6 +7,8 @@ | ||||
| #include <stdio.h> | ||||
| #include <string.h> | ||||
| #include <m-dict.h> | ||||
| #include "FreeRTOS.h" | ||||
| #include "task.h" | ||||
| 
 | ||||
| extern UART_HandleTypeDef DEBUG_UART; | ||||
| 
 | ||||
| @ -122,3 +124,11 @@ bool furi_stdglue_set_thread_stdout_callback(FuriStdglueWriteCallback callback) | ||||
|         return false; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| void __malloc_lock(struct _reent* REENT) { | ||||
|     vTaskSuspendAll(); | ||||
| } | ||||
| 
 | ||||
| void __malloc_unlock(struct _reent* REENT) { | ||||
|     xTaskResumeAll(); | ||||
| } | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| #include "thread.h" | ||||
| #include "memmgr.h" | ||||
| #include "memmgr_heap.h" | ||||
| #include "check.h" | ||||
| 
 | ||||
| #include <m-string.h> | ||||
| @ -16,6 +17,9 @@ struct FuriThread { | ||||
| 
 | ||||
|     osThreadAttr_t attr; | ||||
|     osThreadId_t id; | ||||
| 
 | ||||
|     bool heap_trace_enabled; | ||||
|     size_t heap_size; | ||||
| }; | ||||
| 
 | ||||
| void furi_thread_set_state(FuriThread* thread, FuriThreadState state) { | ||||
| @ -33,8 +37,17 @@ void furi_thread_body(void* context) { | ||||
|     furi_assert(thread->state == FuriThreadStateStarting); | ||||
|     furi_thread_set_state(thread, FuriThreadStateRunning); | ||||
| 
 | ||||
|     if(thread->heap_trace_enabled == true) { | ||||
|         memmgr_heap_enable_thread_trace(thread->id); | ||||
|     } | ||||
| 
 | ||||
|     thread->ret = thread->callback(thread->context); | ||||
| 
 | ||||
|     if(thread->heap_trace_enabled == true) { | ||||
|         thread->heap_size = memmgr_heap_get_thread_memory(thread->id); | ||||
|         memmgr_heap_disable_thread_trace(thread->id); | ||||
|     } | ||||
| 
 | ||||
|     furi_assert(thread->state == FuriThreadStateRunning); | ||||
|     furi_thread_set_state(thread, FuriThreadStateStopped); | ||||
| 
 | ||||
| @ -125,3 +138,27 @@ osStatus_t furi_thread_join(FuriThread* thread) { | ||||
|     furi_assert(thread); | ||||
|     return osThreadJoin(thread->id); | ||||
| } | ||||
| 
 | ||||
| osThreadId_t furi_thread_get_thread_id(FuriThread* thread) { | ||||
|     return thread->id; | ||||
| } | ||||
| 
 | ||||
| void furi_thread_enable_heap_trace(FuriThread* thread) { | ||||
|     furi_assert(thread); | ||||
|     furi_assert(thread->state == FuriThreadStateStopped); | ||||
|     furi_assert(thread->heap_trace_enabled == false); | ||||
|     thread->heap_trace_enabled = true; | ||||
| } | ||||
| 
 | ||||
| void furi_thread_disable_heap_trace(FuriThread* thread) { | ||||
|     furi_assert(thread); | ||||
|     furi_assert(thread->state == FuriThreadStateStopped); | ||||
|     furi_assert(thread->heap_trace_enabled == true); | ||||
|     thread->heap_trace_enabled = false; | ||||
| } | ||||
| 
 | ||||
| size_t furi_thread_get_heap_size(FuriThread* thread) { | ||||
|     furi_assert(thread); | ||||
|     furi_assert(thread->heap_trace_enabled == true); | ||||
|     return thread->heap_size; | ||||
| } | ||||
|  | ||||
| @ -18,91 +18,78 @@ typedef enum { | ||||
| /** FuriThread anonymous structure */ | ||||
| typedef struct FuriThread FuriThread; | ||||
| 
 | ||||
| /**
 | ||||
|  * FuriThreadCallback | ||||
| /** FuriThreadCallback
 | ||||
|  * Your callback to run in new thread | ||||
|  * @warning don't use osThreadExit | ||||
|  */ | ||||
| typedef int32_t (*FuriThreadCallback)(void* context); | ||||
| 
 | ||||
| /**
 | ||||
|  * FuriThread state change calback | ||||
| /** FuriThread state change calback
 | ||||
|  * called upon thread state change | ||||
|  * @param state - new thread state | ||||
|  * @param context - callback context | ||||
|  */ | ||||
| typedef void (*FuriThreadStateCallback)(FuriThreadState state, void* context); | ||||
| 
 | ||||
| /**
 | ||||
|  * Allocate FuriThread | ||||
| /** Allocate FuriThread
 | ||||
|  * @return FuriThread instance | ||||
|  */ | ||||
| FuriThread* furi_thread_alloc(); | ||||
| 
 | ||||
| /**
 | ||||
|  * Release FuriThread | ||||
| /** Release FuriThread
 | ||||
|  * @param thread - FuriThread instance | ||||
|  */ | ||||
| void furi_thread_free(FuriThread* thread); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread name | ||||
| /** Set FuriThread name
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param name - string | ||||
|  */ | ||||
| void furi_thread_set_name(FuriThread* thread, const char* name); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread stack size | ||||
| /** Set FuriThread stack size
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param stack_size - stack size in bytes | ||||
|  */ | ||||
| void furi_thread_set_stack_size(FuriThread* thread, size_t stack_size); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread callback | ||||
| /** Set FuriThread callback
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param callback - FuriThreadCallback, called upon thread run | ||||
|  */ | ||||
| void furi_thread_set_callback(FuriThread* thread, FuriThreadCallback callback); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread context | ||||
| /** Set FuriThread context
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param context - pointer to context for thread callback | ||||
|  */ | ||||
| void furi_thread_set_context(FuriThread* thread, void* context); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread state change callback | ||||
| /** Set FuriThread state change callback
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param callack - state change callback | ||||
|  */ | ||||
| void furi_thread_set_state_callback(FuriThread* thread, FuriThreadStateCallback callback); | ||||
| 
 | ||||
| /**
 | ||||
|  * Set FuriThread state change context | ||||
| /** Set FuriThread state change context
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @param context - pointer to context | ||||
|  */ | ||||
| void furi_thread_set_state_context(FuriThread* thread, void* context); | ||||
| 
 | ||||
| /**
 | ||||
|  * Get FuriThread state | ||||
| /** Get FuriThread state
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @return thread state from FuriThreadState | ||||
|  */ | ||||
| FuriThreadState furi_thread_get_state(FuriThread* thread); | ||||
| 
 | ||||
| /**
 | ||||
|  * Start FuriThread | ||||
| /** Start FuriThread
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @return true on success | ||||
|  */ | ||||
| bool furi_thread_start(FuriThread* thread); | ||||
| 
 | ||||
| /**
 | ||||
|  * Treminate FuriThread | ||||
| /** Treminate FuriThread
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @return osStatus_t | ||||
|  * @warning terminating statefull thread is dangerous | ||||
| @ -110,13 +97,33 @@ bool furi_thread_start(FuriThread* thread); | ||||
|  */ | ||||
| osStatus_t furi_thread_terminate(FuriThread* thread); | ||||
| 
 | ||||
| /**
 | ||||
|  * Join FuriThread | ||||
| /** Join FuriThread
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @return osStatus_t | ||||
|  */ | ||||
| osStatus_t furi_thread_join(FuriThread* thread); | ||||
| 
 | ||||
| /** Get CMSIS Thread ID
 | ||||
|  * @param thread - FuriThread instance | ||||
|  * @return osThreadId_t or NULL | ||||
|  */ | ||||
| osThreadId_t furi_thread_get_thread_id(FuriThread* thread); | ||||
| 
 | ||||
| /** Enable heap tracing
 | ||||
|  * @param thread - FuriThread instance | ||||
|  */ | ||||
| void furi_thread_enable_heap_trace(FuriThread* thread); | ||||
| 
 | ||||
| /** Disable heap tracing
 | ||||
|  * @param thread - FuriThread instance | ||||
|  */ | ||||
| void furi_thread_disable_heap_trace(FuriThread* thread); | ||||
| 
 | ||||
| /** Get thread heap size
 | ||||
|  * @param thread - FuriThread instance | ||||
|  */ | ||||
| size_t furi_thread_get_heap_size(FuriThread* thread); | ||||
| 
 | ||||
| #ifdef __cplusplus | ||||
| } | ||||
| #endif | ||||
|  | ||||
| @ -86,7 +86,6 @@ C_SOURCES		+= \ | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/tasks.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/timers.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM4F/port.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c \
 | ||||
|  | ||||
| @ -86,7 +86,6 @@ C_SOURCES		+= \ | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/tasks.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/timers.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM4F/port.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_core.c \
 | ||||
| 	$(CUBE_DIR)/Middlewares/ST/STM32_USB_Device_Library/Core/Src/usbd_ctlreq.c \
 | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 あく
						あく