1: /* 2: * File : mem.c 3: * This file is part of RT-Thread RTOS 4: * COPYRIGHT (C) 2008 - 2012, RT-Thread Development Team 5: * 6: * This program is free software; you can redistribute it and/or modify 7: * it under the terms of the GNU General Public License as published by 8: * the Free Software Foundation; either version 2 of the License, or 9: * (at your option) any later version. 10: * 11: * This program is distributed in the hope that it will be useful, 12: * but WITHOUT ANY WARRANTY; without even the implied warranty of 13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14: * GNU General Public License for more details. 15: * 16: * You should have received a copy of the GNU General Public License along 17: * with this program; if not, write to the Free Software Foundation, Inc., 18: * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19: * 20: * Change Logs: 21: * Date Author Notes 22: * 2008-7-12 Bernard the first version 23: * 2010-06-09 Bernard fix the end stub of heap 24: * fix memory check in rt_realloc function 25: * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca 26: * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer. 27: */ 28: 29: /* 30: * Copyright (c) 2001-2004 Swedish Institute of Computer Science. 31: * All rights reserved. 32: * 33: * Redistribution and use in source and binary forms, with or without modification, 34: * are permitted provided that the following conditions are met: 35: * 36: * 1. Redistributions of source code must retain the above copyright notice, 37: * this list of conditions and the following disclaimer. 38: * 2. Redistributions in binary form must reproduce the above copyright notice, 39: * this list of conditions and the following disclaimer in the documentation 40: * and/or other materials provided with the distribution. 41: * 3. The name of the author may not be used to endorse or promote products 42: * derived from this software without specific prior written permission. 43: * 44: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 45: * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 46: * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 47: * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 48: * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 49: * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 50: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 51: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 52: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 53: * OF SUCH DAMAGE. 54: * 55: * This file is part of the lwIP TCP/IP stack. 56: * 57: * Author: Adam Dunkels <adam@sics.se> 58: * Simon Goldschmidt 59: * 60: */ 61: 62: #include <rthw.h> 63: #include <rtthread.h> 64: 65: #ifndef RT_USING_MEMHEAP_AS_HEAP 66: 67: /* #define RT_MEM_DEBUG */ 68: #define RT_MEM_STATS 69: 70: #if defined (RT_USING_HEAP) && defined (RT_USING_SMALL_MEM) 71: #ifdef RT_USING_HOOK 72: static void (*rt_malloc_hook)(void *ptr, rt_size_t size); 73: static void (*rt_free_hook)(void *ptr); 74: 75: /** 76: * @addtogroup Hook 77: */ 78: 79: /*@{*/ 80: 81: /** 82: * This function will set a hook function, which will be invoked when a memory 83: * block is allocated from heap memory. 84: * 85: * @param hook the hook function 86: */ 87: void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size)) 88: { 89: rt_malloc_hook = hook; 90: } 91: 92: /** 93: * This function will set a hook function, which will be invoked when a memory 94: * block is released to heap memory. 95: * 96: * @param hook the hook function 97: */ 98: void rt_free_sethook(void (*hook)(void *ptr)) 99: { 100: rt_free_hook = hook; 101: } 102: 103: /*@}*/ 104: 105: #endif 106: 107: #define HEAP_MAGIC 0x1ea0 108: struct heap_mem 109: { 110: /* magic and used flag */ 111: rt_uint16_t magic; 112: rt_uint16_t used; 113: 114: rt_size_t next, prev; 115: }; 116: 117: /** pointer to the heap: for alignment, heap_ptr is now a pointer instead of an array */ 118: static rt_uint8_t *heap_ptr; 119: 120: /** the last entry, always unused! */ 121: static struct heap_mem *heap_end; 122: 123: #define MIN_SIZE 12 124: #define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE) 125: #define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct heap_mem), RT_ALIGN_SIZE) 126: 127: static struct heap_mem *lfree; /* pointer to the lowest free block */ 128: 129: static struct rt_semaphore heap_sem; 130: static rt_size_t mem_size_aligned; 131: 132: #ifdef RT_MEM_STATS 133: static rt_size_t used_mem, max_mem; 134: #endif 135: 136: static void plug_holes(struct heap_mem *mem) 137: { 138: struct heap_mem *nmem; 139: struct heap_mem *pmem; 140: 141: RT_ASSERT((rt_uint8_t *)mem >= heap_ptr); 142: RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)heap_end); 143: RT_ASSERT(mem->used == 0); 144: 145: /* plug hole forward */ 146: nmem = (struct heap_mem *)&heap_ptr[mem->next]; 147: if (mem != nmem && 148: nmem->used == 0 && 149: (rt_uint8_t *)nmem != (rt_uint8_t *)heap_end) 150: { 151: /* if mem->next is unused and not end of heap_ptr, 152: * combine mem and mem->next 153: */ 154: if (lfree == nmem) 155: { 156: lfree = mem; 157: } 158: mem->next = nmem->next; 159: ((struct heap_mem *)&heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - heap_ptr; 160: } 161: 162: /* plug hole backward */ 163: pmem = (struct heap_mem *)&heap_ptr[mem->prev]; 164: if (pmem != mem && pmem->used == 0) 165: { 166: /* if mem->prev is unused, combine mem and mem->prev */ 167: if (lfree == mem) 168: { 169: lfree = pmem; 170: } 171: pmem->next = mem->next; 172: ((struct heap_mem *)&heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - heap_ptr; 173: } 174: } « end plug_holes » 175: 176: /** 177: * @ingroup SystemInit 178: * 179: * This function will init system heap 180: * 181: * @param begin_addr the beginning address of system page 182: * @param end_addr the end address of system page 183: */ 184: void rt_system_heap_init(void *begin_addr, void *end_addr) 185: { 186: struct heap_mem *mem; 187: rt_uint32_t begin_align = RT_ALIGN((rt_uint32_t)begin_addr, RT_ALIGN_SIZE); 188: rt_uint32_t end_align = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_ALIGN_SIZE); 189: 190: RT_DEBUG_NOT_IN_INTERRUPT; 191: 192: /* alignment addr */ 193: if ((end_align > (2 * SIZEOF_STRUCT_MEM)) && 194: ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align)) 195: { 196: /* calculate the aligned memory size */ 197: mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM; 198: } 199: else 200: { 201: rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n", 202: (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr); 203: 204: return; 205: } 206: 207: /* point to begin address of heap */ 208: heap_ptr = (rt_uint8_t *)begin_align; 209: 210: RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n", 211: (rt_uint32_t)heap_ptr, mem_size_aligned)); 212: 213: /* initialize the start of the heap */ 214: mem = (struct heap_mem *)heap_ptr; 215: mem->magic = HEAP_MAGIC; 216: mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM; 217: mem->prev = 0; 218: mem->used = 0; 219: 220: /* initialize the end of the heap */ 221: heap_end = (struct heap_mem *)&heap_ptr[mem->next]; 222: heap_end->magic = HEAP_MAGIC; 223: heap_end->used = 1; 224: heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM; 225: heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM; 226: 227: rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO); 228: 229: /* initialize the lowest-free pointer to the start of the heap */ 230: lfree = (struct heap_mem *)heap_ptr; 231: } « end rt_system_heap_init » 232: 233: /** 234: * @addtogroup MM 235: */ 236: 237: /*@{*/ 238: 239: /** 240: * Allocate a block of memory with a minimum of 'size' bytes. 241: * 242: * @param size is the minimum size of the requested block in bytes. 243: * 244: * @return pointer to allocated memory or NULL if no free memory was found. 245: */ 246: void *rt_malloc(rt_size_t size) 247: { 248: rt_size_t ptr, ptr2; 249: struct heap_mem *mem, *mem2; 250: 251: RT_DEBUG_NOT_IN_INTERRUPT; 252: 253: if (size == 0) 254: return RT_NULL; 255: 256: if (size != RT_ALIGN(size, RT_ALIGN_SIZE)) 257: RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d, but align to %d\n", 258: size, RT_ALIGN(size, RT_ALIGN_SIZE))); 259: else 260: RT_DEBUG_LOG(RT_DEBUG_MEM, ("malloc size %d\n", size)); 261: 262: /* alignment size */ 263: size = RT_ALIGN(size, RT_ALIGN_SIZE); 264: 265: if (size > mem_size_aligned) 266: { 267: RT_DEBUG_LOG(RT_DEBUG_MEM, ("no memory\n")); 268: 269: return RT_NULL; 270: } 271: 272: /* every data block must be at least MIN_SIZE_ALIGNED long */ 273: if (size < MIN_SIZE_ALIGNED) 274: size = MIN_SIZE_ALIGNED; 275: 276: /* take memory semaphore */ 277: rt_sem_take(&heap_sem, RT_WAITING_FOREVER); 278: 279: for (ptr = (rt_uint8_t *)lfree - heap_ptr; 280: ptr < mem_size_aligned - size; 281: ptr = ((struct heap_mem *)&heap_ptr[ptr])->next) 282: { 283: mem = (struct heap_mem *)&heap_ptr[ptr]; 284: 285: if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) 286: { 287: /* mem is not used and at least perfect fit is possible: 288: * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ 289: 290: if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= 291: (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) 292: { 293: /* (in addition to the above, we test if another struct heap_mem (SIZEOF_STRUCT_MEM) containing 294: * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') 295: * -> split large block, create empty remainder, 296: * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if 297: * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, 298: * struct heap_mem would fit in but no data between mem2 and mem2->next 299: * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty 300: * region that couldn't hold data, but when mem->next gets freed, 301: * the 2 regions would be combined, resulting in more free memory 302: */ 303: ptr2 = ptr + SIZEOF_STRUCT_MEM + size; 304: 305: /* create mem2 struct */ 306: mem2 = (struct heap_mem *)&heap_ptr[ptr2]; 307: mem2->used = 0; 308: mem2->next = mem->next; 309: mem2->prev = ptr; 310: 311: /* and insert it between mem and mem->next */ 312: mem->next = ptr2; 313: mem->used = 1; 314: 315: if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM) 316: { 317: ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2; 318: } 319: #ifdef RT_MEM_STATS 320: used_mem += (size + SIZEOF_STRUCT_MEM); 321: if (max_mem < used_mem) 322: max_mem = used_mem; 323: #endif 324: } « end if mem->next-(ptr+SIZEOF... » 325: else 326: { 327: /* (a mem2 struct does no fit into the user data space of mem and mem->next will always 328: * be used at this point: if not we have 2 unused structs in a row, plug_holes should have 329: * take care of this). 330: * -> near fit or excact fit: do not split, no mem2 creation 331: * also can't move mem->next directly behind mem, since mem->next 332: * will always be used at this point! 333: */ 334: mem->used = 1; 335: #ifdef RT_MEM_STATS 336: used_mem += mem->next - ((rt_uint8_t*)mem - heap_ptr); 337: if (max_mem < used_mem) 338: max_mem = used_mem; 339: #endif 340: } 341: /* set memory block magic */ 342: mem->magic = HEAP_MAGIC; 343: 344: if (mem == lfree) 345: { 346: /* Find next free block after mem and update lowest free pointer */ 347: while (lfree->used && lfree != heap_end) 348: lfree = (struct heap_mem *)&heap_ptr[lfree->next]; 349: 350: RT_ASSERT(((lfree == heap_end) || (!lfree->used))); 351: } 352: 353: rt_sem_release(&heap_sem); 354: RT_ASSERT((rt_uint32_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_uint32_t)heap_end); 355: RT_ASSERT((rt_uint32_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0); 356: RT_ASSERT((((rt_uint32_t)mem) & (RT_ALIGN_SIZE-1)) == 0); 357: 358: RT_DEBUG_LOG(RT_DEBUG_MEM, 359: ("allocate memory at 0x%x, size: %d\n", 360: (rt_uint32_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM), 361: (rt_uint32_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr)))); 362: 363: RT_OBJECT_HOOK_CALL(rt_malloc_hook, 364: (((void *)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM)), size)); 365: 366: /* return the memory data except mem struct */ 367: return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM; 368: } « end if (!mem->used)&&(mem->n... » 369: } « end for ptr=(rt_uint8_t*)lfre... » 370: 371: rt_sem_release(&heap_sem); 372: 373: return RT_NULL; 374: } « end rt_malloc » 375: RTM_EXPORT(rt_malloc); 376: 377: /** 378: * This function will change the previously allocated memory block. 379: * 380: * @param rmem pointer to memory allocated by rt_malloc 381: * @param newsize the required new size 382: * 383: * @return the changed memory block address 384: */ 385: void *rt_realloc(void *rmem, rt_size_t newsize) 386: { 387: rt_size_t size; 388: rt_size_t ptr, ptr2; 389: struct heap_mem *mem, *mem2; 390: void *nmem; 391: 392: RT_DEBUG_NOT_IN_INTERRUPT; 393: 394: /* alignment size */ 395: newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE); 396: if (newsize > mem_size_aligned) 397: { 398: RT_DEBUG_LOG(RT_DEBUG_MEM, ("realloc: out of memory\n")); 399: 400: return RT_NULL; 401: } 402: 403: /* allocate a new memory block */ 404: if (rmem == RT_NULL) 405: return rt_malloc(newsize); 406: 407: rt_sem_take(&heap_sem, RT_WAITING_FOREVER); 408: 409: if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr || 410: (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end) 411: { 412: /* illegal memory */ 413: rt_sem_release(&heap_sem); 414: 415: return rmem; 416: } 417: 418: mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM); 419: 420: ptr = (rt_uint8_t *)mem - heap_ptr; 421: size = mem->next - ptr - SIZEOF_STRUCT_MEM; 422: if (size == newsize) 423: { 424: /* the size is the same as */ 425: rt_sem_release(&heap_sem); 426: 427: return rmem; 428: } 429: 430: if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size) 431: { 432: /* split memory block */ 433: #ifdef RT_MEM_STATS 434: used_mem -= (size - newsize); 435: #endif 436: 437: ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; 438: mem2 = (struct heap_mem *)&heap_ptr[ptr2]; 439: mem2->magic= HEAP_MAGIC; 440: mem2->used = 0; 441: mem2->next = mem->next; 442: mem2->prev = ptr; 443: mem->next = ptr2; 444: if (mem2->next != mem_size_aligned + SIZEOF_STRUCT_MEM) 445: { 446: ((struct heap_mem *)&heap_ptr[mem2->next])->prev = ptr2; 447: } 448: 449: plug_holes(mem2); 450: 451: rt_sem_release(&heap_sem); 452: 453: return rmem; 454: } « end if newsize+SIZEOF_STRUCT... » 455: rt_sem_release(&heap_sem); 456: 457: /* expand memory */ 458: nmem = rt_malloc(newsize); 459: if (nmem != RT_NULL) /* check memory */ 460: { 461: rt_memcpy(nmem, rmem, size < newsize ? size : newsize); 462: rt_free(rmem); 463: } 464: 465: return nmem; 466: } « end rt_realloc » 467: RTM_EXPORT(rt_realloc); 468: 469: /** 470: * This function will contiguously allocate enough space for count objects 471: * that are size bytes of memory each and returns a pointer to the allocated 472: * memory. 473: * 474: * The allocated memory is filled with bytes of value zero. 475: * 476: * @param count number of objects to allocate 477: * @param size size of the objects to allocate 478: * 479: * @return pointer to allocated memory / NULL pointer if there is an error 480: */ 481: void *rt_calloc(rt_size_t count, rt_size_t size) 482: { 483: void *p; 484: 485: RT_DEBUG_NOT_IN_INTERRUPT; 486: 487: /* allocate 'count' objects of size 'size' */ 488: p = rt_malloc(count * size); 489: 490: /* zero the memory */ 491: if (p) 492: rt_memset(p, 0, count * size); 493: 494: return p; 495: } 496: RTM_EXPORT(rt_calloc); 497: 498: /** 499: * This function will release the previously allocated memory block by 500: * rt_malloc. The released memory block is taken back to system heap. 501: * 502: * @param rmem the address of memory which will be released 503: */ 504: void rt_free(void *rmem) 505: { 506: struct heap_mem *mem; 507: 508: RT_DEBUG_NOT_IN_INTERRUPT; 509: 510: if (rmem == RT_NULL) 511: return; 512: RT_ASSERT((((rt_uint32_t)rmem) & (RT_ALIGN_SIZE-1)) == 0); 513: RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)heap_ptr && 514: (rt_uint8_t *)rmem < (rt_uint8_t *)heap_end); 515: 516: RT_OBJECT_HOOK_CALL(rt_free_hook, (rmem)); 517: 518: if ((rt_uint8_t *)rmem < (rt_uint8_t *)heap_ptr || 519: (rt_uint8_t *)rmem >= (rt_uint8_t *)heap_end) 520: { 521: RT_DEBUG_LOG(RT_DEBUG_MEM, ("illegal memory\n")); 522: 523: return; 524: } 525: 526: /* Get the corresponding struct heap_mem ... */ 527: mem = (struct heap_mem *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM); 528: 529: RT_DEBUG_LOG(RT_DEBUG_MEM, 530: ("release memory 0x%x, size: %d\n", 531: (rt_uint32_t)rmem, 532: (rt_uint32_t)(mem->next - ((rt_uint8_t *)mem - heap_ptr)))); 533: 534: 535: /* protect the heap from concurrent access */ 536: rt_sem_take(&heap_sem, RT_WAITING_FOREVER); 537: 538: /* ... which has to be in a used state ... */ 539: RT_ASSERT(mem->used); 540: RT_ASSERT(mem->magic == HEAP_MAGIC); 541: /* ... and is now unused. */ 542: mem->used = 0; 543: mem->magic = 0; 544: 545: if (mem < lfree) 546: { 547: /* the newly freed struct is now the lowest */ 548: lfree = mem; 549: } 550: 551: #ifdef RT_MEM_STATS 552: used_mem -= (mem->next - ((rt_uint8_t*)mem - heap_ptr)); 553: #endif 554: 555: /* finally, see if prev or next are free also */ 556: plug_holes(mem); 557: rt_sem_release(&heap_sem); 558: } « end rt_free » 559: RTM_EXPORT(rt_free); 560: 561: #ifdef RT_MEM_STATS 562: void rt_memory_info(rt_uint32_t *total, 563: rt_uint32_t *used, 564: rt_uint32_t *max_used) 565: { 566: if (total != RT_NULL) 567: *total = mem_size_aligned; 568: if (used != RT_NULL) 569: *used = used_mem; 570: if (max_used != RT_NULL) 571: *max_used = max_mem; 572: } 573: 574: #ifdef RT_USING_FINSH 575: #include <finsh.h> 576: 577: void list_mem(void) 578: { 579: rt_kprintf("total memory: %d\n", mem_size_aligned); 580: rt_kprintf("used memory : %d\n", used_mem); 581: rt_kprintf("maximum allocated memory: %d\n", max_mem); 582: } 583: FINSH_FUNCTION_EXPORT(list_mem, list memory usage information) 584: #endif 585: #endif 586: 587: /*@}*/ 588: 589: #endif /* end of RT_USING_HEAP */ 590: #endif /* end of RT_USING_MEMHEAP_AS_HEAP */ 591:
RT-Thread mem.c
最新推荐文章于 2023-01-13 14:17:58 发布