内存池——第一章 几种常用的内存池技术

几乎所有应用程序中都会有内存的分配和释放,而频繁的分配和释放内存无疑会产生内存碎片,降低系统性能,尤其对性能要求较高的程序比较明显。下面介绍几种常见的内存池技术。

 

    一  环形缓存

 

    环形缓存的基本原理如图:

    初始化状态(wpos_ = rpos_):

    写了部分数据,同时读了一部分数据(wpos_ > rpos_):

    wpos_写数据到尾部后,又从头开始,rpos_还读到尾部(wpos_ < rpos_):

    rpos_读了N(N>= 1)圈后,赶上了wpos_,也就是说没有数据可读了(wpos_ < rpos_):

   综合起来,看起来像这样子:

 需要注意的是:

    #1    wpos_ < rpos_的情况下,rpos_ 读数据一直读到尾部,然后又从头部开始,数据拼接一块即可;

    #2    如果 | wpos_ -  rpos |  < cnt,即没有足够的数据可来写的时候,需要重写分配内存,具体分配多少,根据你的程序来定,额外大小或者1.5倍原大小;

    部分实现代码如下:

点击(此处)折叠或打开

  1. #define EXTRA_BUFFER_SIZE        64
  2.  
  3. namespace easy
  4. {
  5.     template<class _Type,class _Alloc >
  6.     class EasyRingbuffer 
  7.     {
  8.     public:
  9.         typedef _Alloc allocator_type;
  10.  
  11.         explicit EasyRingbuffer(size_t size):
  12.         size_(size),
  13.             wpos_(0),
  14.             rpos_(0)
  15.         {
  16.             buffer_ = _allocate(size_);
  17.         }
  18.  
  19.         ~EasyRingbuffer() { _deallocate(buffer_,size_); }
  20.  
  21.         template<typename T> void append(T val) 
  22.         { 
  23.             append((easy_uint8*)&val,sizeof(val));
  24.         }
  25.  
  26.         void append(const easy_uint8* src, size_t cnt)
  27.         {
  28.             if (!cnt)
  29.             {
  30.                 return;
  31.             }
  32.  
  33.             //    case 1: rpos_ <= wpos_
  34.             if (rpos_ <= wpos_)
  35.             {
  36.                 if (size_ - wpos_ >= cnt)
  37.                 {
  38.                     memmove(buffer_ + wpos_,src,cnt);
  39.                     wpos_ += cnt;
  40.                     return;
  41.                 }
  42.                 else
  43.                 {
  44.                     if (size_ - wpos_ + rpos_ > cnt)    // >= is ok>
  45.                     {
  46.                         memmove(buffer_ + wpos_, src, size_ - wpos_);
  47.                         memmove(buffer_, src + size_ - wpos_, cnt - (size_ - wpos_));
  48.                         wpos_ = cnt - (size_ - wpos_);
  49.                         return;
  50.                     }
  51.                     else
  52.                     {
  53.                         _Type* new_buffer = _allocate(size_ + cnt - (size_ - wpos_));
  54.                         memmove(new_buffer,buffer_,wpos_);
  55.                         memmove(new_buffer + wpos_, src, cnt);
  56.                         _deallocate(buffer_,size_);
  57.                         size_ = size_ + cnt - (size_ - wpos_);
  58.                         wpos_ += cnt;
  59.                         buffer_ = new_buffer;
  60.                         return;
  61.                     }
  62.                 }
  63.             }
  64.             //    case 2: rpos_ > wpos_ 
  65.             else if(rpos_ > wpos_)
  66.             {
  67.                 if (rpos_ - wpos_ > cnt)    // >= is ok ?
  68.                 {
  69.                     if (rpos_ - wpos_ > cnt)
  70.                     {
  71.                         memmove(buffer_ + wpos_,src,cnt);
  72.                         wpos_ += cnt;
  73.                         return;
  74.                     }
  75.                     else
  76.                     {
  77.                         _Type* new_buffer = _allocate(size_ + cnt - (rpos_ - wpos_) + EXTRA_BUFFER_SIZE);
  78.                         memmove(new_buffer,buffer_,wpos_);
  79.                         memmove(new_buffer + wpos_,src,cnt);
  80.                         memmove(new_buffer + wpos_ + cnt - (rpos_ - wpos_) + EXTRA_BUFFER_SIZE,buffer_ + rpos_,size_ - rpos_);
  81.                         _deallocate(buffer_,size_);
  82.                         rpos_ += cnt - (rpos_ - wpos_) + EXTRA_BUFFER_SIZE;
  83.                         wpos_ += cnt;
  84.                         size_ = size_ + cnt - (rpos_ - wpos_) + EXTRA_BUFFER_SIZE;
  85.                         buffer_ = new_buffer;
  86.                         return;
  87.                     }
  88.                 }
  89.             }
  90.         }
  91.  
  92.         EasyRingbuffer& operator << (easy_bool val)
  93.         {
  94.             append<easy_bool>(val);
  95.             return *this;
  96.         }
  97.  
  98.         EasyRingbuffer& operator << (easy_uint8 val)
  99.         {
  100.             append<easy_uint8>(val);
  101.             return *this;
  102.         }
  103.  
  104.         EasyRingbuffer& operator << (easy_uint16 val)
  105.         {
  106.             append<easy_uint16>(val);
  107.             return *this;
  108.         }
  109.  
  110.         EasyRingbuffer& operator << (easy_uint32 val)
  111.         {
  112.             append<easy_uint32>(val);
  113.             return *this;
  114.         }
  115.  
  116.         EasyRingbuffer& operator << (easy_uint64 val)
  117.         {
  118.             append<easy_uint64>(val);
  119.             return *this;
  120.         }
  121.  
  122.         EasyRingbuffer& operator << (easy_int8 val)
  123.         {
  124.             append<easy_int8>(val);
  125.             return *this;
  126.         }
  127.  
  128.         EasyRingbuffer& operator << (easy_int16 val)
  129.         {
  130.             append<easy_int16>(val);
  131.             return *this;
  132.         }
  133.  
  134.         EasyRingbuffer& operator << (easy_int32 val)
  135.         {
  136.             append<easy_int32>(val);
  137.             return *this;
  138.         }
  139.  
  140.         EasyRingbuffer& operator << (easy_int64 val)
  141.         {
  142.             append<easy_int64>(val);
  143.             return *this;
  144.         }
  145.  
  146.         EasyRingbuffer& operator << (easy_float val)
  147.         {
  148.             append<easy_float>(val);
  149.             return *this;
  150.         }
  151.  
  152.         EasyRingbuffer& operator << (easy_double val)
  153.         {
  154.             append<easy_double>(val);
  155.             return *this;
  156.         }
  157.  
  158.         EasyRingbuffer& operator << (const std::string& val)
  159.         {
  160.             append((easy_uint8 const*)val.c_str(),val.length());
  161.             return *this;
  162.         }
  163.  
  164.         EasyRingbuffer& operator << (const char* val)
  165.         {
  166.             append((easy_uint8 const *)val, val ? strlen(val) : 0);
  167.             return *this;
  168.         }
  169.  
  170.         template<typename T> T read() 
  171.         {
  172.             T r;
  173.             read((easy_uint8*)&r,sizeof(T));
  174.             return r;
  175.         }
  176.  
  177.         void read(easy_uint8* des,size_t len)
  178.         {
  179.             if (_read_finish())
  180.             {
  181.                 return;
  182.             }
  183.             if (rpos_ < wpos_)
  184.             {
  185.                 if (wpos_ - rpos_ >= len)
  186.                 {
  187.                     memmove(des,buffer_ + rpos_,len);
  188.                     rpos_ += len;
  189.                 }
  190.                 //    else just skip
  191.             }
  192.             else if (rpos_ > wpos_)
  193.             {
  194.                 if (size_ - rpos_ >= len)
  195.                 {
  196.                     memmove(des,buffer_ + rpos_,len);
  197.                     rpos_ += len;
  198.                 }
  199.                 else
  200.                 {
  201.                     memmove(des,buffer_ + rpos_, size_ - rpos_);
  202.                     memmove(des + size_ - rpos_, buffer_, len - (size_ - rpos_));
  203.                     rpos_ = len - (size_ - rpos_);
  204.                 }
  205.             }
  206.         }
  207.  
  208.         EasyRingbuffer& operator >> (easy_bool& val)
  209.         {
  210.             val = read<easy_bool>();
  211.             return *this;
  212.         }
  213.  
  214.         EasyRingbuffer& operator >> (easy_uint8& val)
  215.         {
  216.             val = read<easy_uint8>();
  217.             return *this;
  218.         }
  219.  
  220.         EasyRingbuffer& operator >> (easy_uint16& val)
  221.         {
  222.             val = read<easy_uint16>();
  223.             return *this;
  224.         }
  225.  
  226.         EasyRingbuffer& operator >> (easy_uint32& val)
  227.         {
  228.             val = read<easy_uint32>();
  229.             return *this;
  230.         }
  231.  
  232.         EasyRingbuffer& operator >> (easy_uint64& val)
  233.         {
  234.             val = read<easy_uint64>();
  235.             return *this;
  236.         }
  237.  
  238.         EasyRingbuffer& operator >> (easy_int8& val)
  239.         {
  240.             val = read<easy_int8>();
  241.             return *this;
  242.         }
  243.  
  244.         EasyRingbuffer& operator >> (easy_int16& val)
  245.         {
  246.             val = read<easy_int16>();
  247.             return *this;
  248.         }
  249.  
  250.         EasyRingbuffer& operator >> (easy_int32& val)
  251.         {
  252.             val = read<easy_int32>();
  253.             return *this;
  254.         }
  255.  
  256.         EasyRingbuffer& operator >> (easy_int64& val)
  257.         {
  258.             val = read<easy_int64>();
  259.             return *this;
  260.         }
  261.  
  262.         EasyRingbuffer& operator >> (easy_float& val)
  263.         {
  264.             val = read<easy_float>();
  265.             return *this;
  266.         }
  267.  
  268.         EasyRingbuffer& operator >> (easy_double& val)
  269.         {
  270.             val = read<easy_double>();
  271.             return *this;
  272.         }
  273.  
  274.         size_t size() const { return size_; }
  275.  
  276.         size_t rpos() const { return rpos_; }
  277.  
  278.         size_t wpos() const { return wpos_; }
  279.  
  280.     private:
  281.         _Type* _allocate(size_t size) 
  282.         { 
  283.             _Type* res = 0;
  284.             res = static_cast<_Type*>(alloc_type_.allocate(size)); 
  285.             return res;
  286.         }
  287.  
  288.         void _deallocate(void* p,size_t size) 
  289.         { 
  290.             alloc_type_.deallocate(p,size); 
  291.         }
  292.  
  293.         void _reallocate(void* p,size_t old_size,size_t new_size) { alloc_type_.reallocate(p,old_size,new_size); }
  294.  
  295.         easy_bool _read_finish() { return wpos_ == rpos_; }
  296.  
  297.     private:
  298.         EasyRingbuffer ( const EasyRingbuffer& );
  299.         EasyRingbuffer& operator = ( const EasyRingbuffer& );
  300.     private:
  301.         size_t            size_;
  302.  
  303.         _Type*            buffer_;
  304.  
  305.         size_t            wpos_;
  306.  
  307.         size_t            rpos_;
  308.  
  309.         allocator_type    alloc_type_;
  310.     };
  311. }

 

  二 空闲列表

 

    空闲列表的原理比较简单,一般用于比较大的对象,可预分配一定数量的对象,需要时直接空闲列表中取,使用完后收回,如果空闲列表中已空,则需要重新设置大小了;也可使用时分配,使用完后收回。实现代码如下:

点击(此处)折叠或打开

  1. // use stl
  2.     template<typename _Type, typename _Lock,typename _StorageType /*= std::list<_Type*>*/>
  3.     class lock_queue     
  4.     {
  5.         typedef typename _Type::_Key                _Key;
  6.  
  7.         static const size_t MAX_POOL_SIZE = _Type::MAX_POOL_SIZE;
  8.  
  9.     public:
  10.         _Type* allocate(_Key __key)
  11.         {
  12.             _Type* __ret = 0;
  13.             if (free_list_.empty())
  14.             {
  15.                 __ret = new _Type(__key);
  16.             }
  17.             else
  18.             {
  19.                 lock_.acquire_lock();
  20.                 __ret = free_list_.back();
  21.                 free_list_.pop_back();
  22.                 lock_.release_lock();
  23.             }
  24.             return __ret;
  25.         }
  26.  
  27.         void deallcate(_Type* __val)
  28.         {
  29.             if (!__val)
  30.             {
  31.                 return;
  32.             }
  33.             if (MAX_POOL_SIZE < free_list_.size())
  34.             {
  35.                 delete __val;
  36.                 return;
  37.             }
  38.             lock_.acquire_lock();
  39.             free_list_.push_back(__val);
  40.             lock_.release_lock();
  41.         }
  42.  
  43.         size_t free_size() /*const*/ 
  44.         { 
  45.             size_t __size = 0;
  46.             lock_.acquire_lock(); 
  47.             __size = free_list_.size(); 
  48.             lock_.release_lock();
  49.             return __size;
  50.         }
  51.  
  52.         void clear() 
  53.         { 
  54.             lock_.acquire_lock(); 
  55.             for (typename _StorageType::iterator __it = free_list_.begin(); __it != free_list_.end(); ++__it)
  56.             {
  57.                 if ((*__it))
  58.                 {
  59.                     delete (*__it);
  60.                     (*__it) = NULL;
  61.                 }
  62.             }
  63.             free_list_.clear(); 
  64.             _StorageType().swap(free_list_); 
  65.             lock_.release_lock();
  66.         }
  67.  
  68.         ~lock_queue()
  69.         {
  70.             clear();
  71.         }
  72.     private:
  73.         _Lock                        lock_;
  74.         _StorageType                free_list_;
  75.     };

 

点击(此处)折叠或打开

  1. //anther way,use use stl
  2. template < typename T, int DEFAULT_BLOCK_NUM = 1024 >
  3. class CMemoryPool 
  4. {
  5. public:
  6.     static VOID* operator new ( std::size_t nAllocLength )
  7.     {
  8.         Assert( sizeof(T) == nAllocLength ); 
  9.         Assert( sizeof(T) >= sizeof(UCHAR*) );
  10.         if ( !m_sNewPointer )
  11.         {
  12.             allocBlock();
  13.         }
  14.         UCHAR* ucReturnPointer = m_sNewPointer;
  15.         //the head of 4 bytes is explain the next pointer of memory force,
  16.         //and m_NewPointer just point the next block of memory,when used the next allocation
  17.         m_sNewPointer = *reinterpret_cast<UCHAR**>( ucReturnPointer);
  18.         return ucReturnPointer;
  19.     }
  20.  
  21.     static VOID operator delete( void* vpDeletePointer )
  22.     {
  23.         *reinterpret_cast<UCHAR**>( vpDeletePointer) = m_sNewPointer;    
  24.         m_sNewPointer = static_cast<UCHAR*>(vpDeletePointer);
  25.     }
  26.  
  27.     static VOID allocBlock()
  28.     {
  29.         m_sNewPointer = new UCHAR[sizeof(T) * DEFAULT_BLOCK_NUM]; 
  30.         //casting dual pointer force,that will change the meaning of the head of 4 byte memory 
  31.         UCHAR **ppCurent = reinterpret_cast<UCHAR**>( m_sNewPointer ); 
  32.         UCHAR *ppNext = m_sNewPointer; 
  33.         for( int i = 0; i < DEFAULT_BLOCK_NUM-1; i++ ) 
  34.         {
  35.             ppNext += sizeof(T); 
  36.             *ppCurent = ppNext; 
  37.             //the head of 4 bytes is explain the next pointer of memory force,a memory list in form. 
  38.             ppCurent = reinterpret_cast<UCHAR**>( ppNext ); 
  39.         }
  40.         //if the last memory bock, the head of 4 byte is null
  41.         *ppCurent = 0; 
  42.     }
  43.  
  44. protected:
  45.     virtual ~CMemoryPool()
  46.     {
  47.  
  48.     }
  49. private:
  50.     static UCHAR *m_sNewPointer; 
  51. };
  52.  
  53. template<class T, int BLOCK_NUM > 
  54. UCHAR *CMemoryPool<T, BLOCK_NUM >::m_sNewPointer;

 

    三  stl的二级分配器

 

    stl内部实现的分配器分两种情况:一种是大于128byte的分配,直接使用系统的内存分配函数malloc/free;另外一种为小于128byte的,也就是上面说的二级分配器,它采用了某些技术来管来内存,避免频繁分配释放。简单的说,就是将内存按8字节对齐,分别建立固定值倍数大小的内存池,如8, 8*2 ,8*3..., 当需要分配内存时,根据分配内存的大小,算出所需内存大小的内存池索引,然后根据这个索引找到那块内存池,并从中取出一块返回;同样,内存使用完后,按类似的方法回收。这种方案一般适用于比较小的内存分配的情况,大的可以考虑其他的方案。其流程如下:


下面是具体代码:

点击(此处)折叠或打开

  1. template< bool threads, int inst >
  2.     class __default_alloc_template
  3.     {
  4.         enum {_ALIGN = 8};
  5.         enum {_MAX_BYTES = 128};
  6.         enum {_NFREELISTS = 16}; // _MAX_BYTES/_ALIGN
  7.     
  8.         static size_t _S_round_up(size_t __bytes) { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
  9.  
  10.         static size_t _S_freelist_index(size_t __bytes) { return (((__bytes) + (size_t)_ALIGN-1)/(size_t)_ALIGN - 1); }
  11.  
  12.         union _Obj 
  13.         {
  14.             union _Obj* _M_free_list_link;
  15.             char _M_client_data[1]; /* The client sees this. */
  16.         };
  17.         static _Obj* volatile _S_free_list[_NFREELISTS]; 
  18.  
  19.         // Returns an object of size __n, and optionally adds to size __n free list.
  20.         static void* _S_refill(size_t __n);
  21.  
  22.         // Allocates a chunk for nobjs of size size. nobjs may be reduced
  23.         // if it is inconvenient to allocate the requested number.
  24.         static char* _S_chunk_alloc(size_t __size, int& __nobjs);
  25.  
  26.         static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz);
  27.  
  28.         // Chunk allocation state.
  29.         static char*    _S_start_free;
  30.         static char*    _S_end_free;
  31.         static size_t    _S_heap_size;
  32.  
  33.     public:
  34.         static void* allocate(size_t __n)
  35.         {
  36.             void* __ret = 0;
  37.             if (__n > (size_t) _MAX_BYTES) 
  38.             {
  39.                 __ret = malloc_alloc::allocate(__n);
  40.             }
  41.             else
  42.             {
  43.                 mutex_lock    __lock;
  44.                 __lock.acquire_lock();
  45.                 _Obj* volatile* __my_free_list = _S_free_list + _S_freelist_index(__n);
  46.                 _Obj* volatile __result = *__my_free_list;
  47.                 if (__result == 0)
  48.                 {
  49.                     __ret = _S_refill(_S_round_up(__n));
  50.                 }
  51.                 else 
  52.                 {
  53.                     *__my_free_list = __result -> _M_free_list_link;
  54.                     __ret = __result;
  55.                 }
  56.                 __lock.release_lock();
  57.             }
  58.             return __ret;
  59.         }
  60.  
  61.         /* __p may not be 0 */
  62.         static void deallocate(void* __p, size_t __n)
  63.         {
  64.             if (__n > (size_t) _MAX_BYTES)
  65.             {
  66.                  malloc_alloc::deallocate(__p, __n);
  67.             }
  68.             else 
  69.             {
  70.                 mutex_lock    __lock;
  71.                 __lock.acquire_lock();
  72.                  _Obj* volatile* __my_free_list = _S_free_list + _S_freelist_index(__n);
  73.                  _Obj* __q = (_Obj*)__p;
  74.                  __q -> _M_free_list_link = *__my_free_list;
  75.                  *__my_free_list = __q;
  76.                  __lock.release_lock();
  77.             }
  78.         }
  79.     };
  80.  
  81.     template <bool __threads, int __inst>
  82.     inline bool operator==(const __default_alloc_template<__threads, __inst>&,
  83.         const __default_alloc_template<__threads, __inst>&)
  84.     {
  85.         return true;
  86.     }
  87.  
  88.     template <bool __threads, int __inst>
  89.     inline bool operator!=(const __default_alloc_template<__threads, __inst>&,
  90.         const __default_alloc_template<__threads, __inst>&)
  91.     {
  92.         return false;
  93.     }
  94.  
  95.     /* We allocate memory in large chunks in order to avoid fragmenting */
  96.     /* the malloc heap too much. */
  97.     /* We assume that size is properly aligned. */
  98.     /* We hold the allocation lock. */
  99.     template <bool __threads, int __inst>
  100.     char*    __default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size, int& __nobjs)
  101.     {
  102.         //::_set_new_handler(_out_of_memory);
  103.         char* __result;
  104.         size_t __total_bytes = __size * __nobjs;
  105.         size_t __bytes_left = _S_end_free - _S_start_free;
  106.         //    enough memory to alloc
  107.         if (__bytes_left >= __total_bytes) 
  108.         {
  109.             __result = _S_start_free;
  110.             _S_start_free += __total_bytes;
  111.             return(__result);
  112.         } 
  113.         //    only more than __size can be alloc
  114.         else if (__bytes_left >= __size) 
  115.         {
  116.             __nobjs = (int)(__bytes_left/__size);
  117.             __total_bytes = __size * __nobjs;
  118.             __result = _S_start_free;
  119.             _S_start_free += __total_bytes;
  120.             return(__result);
  121.         } 
  122.         else 
  123.         {
  124.             size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
  125.             // Try to make use of the left-over piece.
  126.             if (__bytes_left > 0) 
  127.             {
  128.                 _Obj* volatile* __my_free_list = _S_free_list + _S_freelist_index(__bytes_left);
  129.                 ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
  130.                 *__my_free_list = (_Obj*)_S_start_free;
  131.             }
  132.             //    alloc __bytes_to_get again
  133.             _S_start_free = (char*)malloc(__bytes_to_get);
  134.  
  135.             //    alloc failed
  136.             if (0 == _S_start_free) 
  137.             {
  138.                 size_t __i;
  139.                 _Obj* volatile* __my_free_list;
  140.                 _Obj* __p;
  141.                 // Try to make do with what we have. That can't
  142.                 // hurt. We do not try smaller requests, since that tends
  143.                 // to result in disaster on multi-process machines.
  144.                 for (__i = __size; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN) 
  145.                 {
  146.                     __my_free_list = _S_free_list + _S_freelist_index(__i);
  147.                     __p = *__my_free_list;
  148.                     if (0 != __p) 
  149.                     {
  150.                         *__my_free_list = __p -> _M_free_list_link;
  151.                         _S_start_free = (char*)__p;
  152.                         _S_end_free = _S_start_free + __i;
  153.                         return(_S_chunk_alloc(__size, __nobjs));
  154.                         // Any leftover piece will eventually make it to the
  155.                         // right free list.
  156.                     }
  157.                 }
  158.                 _S_end_free = 0;    // In case of exception.
  159.                 _S_start_free = (char*) malloc(__bytes_to_get);
  160.                 // This should either throw an
  161.                 // exception or remedy the situation. Thus we assume it
  162.                 // succeeded.
  163.             }
  164.             _S_heap_size += __bytes_to_get;
  165.             _S_end_free = _S_start_free + __bytes_to_get;
  166.             return(_S_chunk_alloc(__size, __nobjs));
  167.         }
  168.     }
  169.  
  170.     /* Returns an object of size __n, and optionally adds to size __n free list.*/
  171.     /* We assume that __n is properly aligned. */
  172.     /* We hold the allocation lock. */
  173.     template <bool __threads, int __inst>
  174.     void* __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
  175.     {
  176.         int __nobjs = 20;
  177.         char* __chunk = _S_chunk_alloc(__n, __nobjs);
  178.         _Obj* volatile* __my_free_list;
  179.         _Obj* __result;
  180.         _Obj* __current_obj;
  181.         _Obj* __next_obj;
  182.         int __i;
  183.  
  184.         if (1 == __nobjs) 
  185.         {
  186.             return(__chunk);
  187.         }
  188.         __my_free_list = _S_free_list + _S_freelist_index(__n);
  189.  
  190.         /* Build free list in chunk */
  191.         __result = (_Obj*)__chunk;
  192.         *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
  193.         for (__i = 1; ; __i++) 
  194.         {
  195.             __current_obj = __next_obj;
  196.             __next_obj = (_Obj*)((char*)__next_obj + __n);
  197.             if (__nobjs - 1 == __i) 
  198.             {
  199.                 __current_obj -> _M_free_list_link = 0;
  200.                 break;
  201.             } 
  202.             else 
  203.             {
  204.                 __current_obj -> _M_free_list_link = __next_obj;
  205.             }
  206.         }
  207.         return(__result);
  208.     }
  209.  
  210.     template <bool threads, int inst>
  211.     void* __default_alloc_template<threads, inst>::reallocate(void* __p, size_t __old_sz, size_t __new_sz)
  212.     {
  213.         mutex_lock    __lock;
  214.         __lock.acquire_lock();
  215.         void* __result;
  216.         size_t __copy_sz;
  217.  
  218.         if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) 
  219.         {
  220.             __lock.release_lock();
  221.             return(realloc(__p, __new_sz));
  222.         }
  223.         if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) 
  224.         {
  225.             __lock.release_lock();
  226.             return(__p);
  227.         }
  228.         __result = allocate(__new_sz);
  229.         __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
  230.         memcpy(__result, __p, __copy_sz);
  231.         deallocate(__p, __old_sz);
  232.         __lock.release_lock();
  233.         return(__result);
  234.     }
  235.  
  236.     template< bool threads, int inst >
  237.     char* __default_alloc_template<threads, inst>::_S_start_free = 0;
  238.  
  239.     template< bool threads, int inst >
  240.     char* __default_alloc_template<threads, inst>::_S_end_free = 0;
  241.  
  242.     template< bool threads, int inst >
  243.     size_t __default_alloc_template<threads, inst>::_S_heap_size = 0;
  244.  
  245.     template <bool __threads, int __inst>
  246.     typename __default_alloc_template<__threads, __inst>::_Obj* volatile
  247.         __default_alloc_template<__threads, __inst> ::_S_free_list[_NFREELISTS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };


 参考:
    sqi stl
     http://www.sgi.com/tech/stl/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值