1 //==========================================================================
5 // Port of Doug Lea's malloc implementation
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
43 // Author(s): Doug Lea (dl at g.oswego.edu), jlarmour
46 // Purpose: Doug Lea's malloc implementation
47 // Description: Doug Lea's malloc has been ported to eCos. This file
48 // provides the implementation in a way acceptable to eCos.
49 // Substantial amounts of unnecessary bits (to eCos) of the
50 // original implementation have been removed to make the
51 // code more tractable. Note this may make a number of the
52 // comments appear to make little sense, or no longer apply!
53 // In particular, mmap support is removed entirely.
54 // Also the memory is "sbrked" all at once at the
55 // beginning, covering the entire memory region given at
56 // construction, and there can be no more afterwards.
57 // Usage: #include <cyg/memalloc/dlmalloc.hxx>
60 //####DESCRIPTIONEND####
62 //==========================================================================
64 // DOCUMENTATION FROM ORIGINAL FILE:
65 // (some now irrelevant parts elided)
67 //----------------------------------------------------------------------------
70 A version of malloc/free/realloc written by Doug Lea and released to the
71 public domain. Send questions/comments/complaints/performance data
72 to dl at cs.oswego.edu
74 * VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee)
76 Note: There may be an updated version of this malloc obtainable at
77 ftp://g.oswego.edu/pub/misc/malloc.c
78 Check before installing!
80 * Why use this malloc?
82 This is not the fastest, most space-conserving, most portable, or
83 most tunable malloc ever written. However it is among the fastest
84 while also being among the most space-conserving, portable and tunable.
85 Consistent balance across these factors results in a good general-purpose
86 allocator. For a high-level description, see
87 http://g.oswego.edu/dl/html/malloc.html
89 * Synopsis of public routines
91 (Much fuller descriptions are contained in the program documentation below.)
93 [ these have of course been renamed in the eCos port ]a
96 Return a pointer to a newly allocated chunk of at least n bytes, or null
97 if no space is available.
99 Release the chunk of memory pointed to by p, or no effect if p is null.
100 realloc(Void_t* p, size_t n);
101 Return a pointer to a chunk of size n that contains the same data
102 as does chunk p up to the minimum of (n, p's size) bytes, or null
103 if no space is available. The returned pointer may or may not be
104 the same as p. If p is null, equivalent to malloc. realloc of
105 zero bytes calls free(p)
110 8 byte alignment is currently hardwired into the design. This
111 seems to suffice for all current machines and C compilers.
113 Assumed pointer representation: 4 or 8 bytes
114 Code for 8-byte pointers is untested by me but has worked
115 reliably by Wolfram Gloger, who contributed most of the
116 changes supporting this.
118 Assumed size_t representation: 4 or 8 bytes
119 Note that size_t is allowed to be 4 bytes even if pointers are 8.
121 Minimum overhead per allocated chunk: 4 or 8 bytes
122 Each malloced chunk has a hidden overhead of 4 bytes holding size
123 and status information.
125 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
126 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
128 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
129 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
130 needed; 4 (8) for a trailing size field
131 and 8 (16) bytes for free list pointers. Thus, the minimum
132 allocatable size is 16/24/32 bytes.
134 Even a request for zero bytes (i.e., malloc(0)) returns a
135 pointer to something of the minimum allocatable size.
137 Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes
138 8-byte size_t: 2^63 - 16 bytes
140 It is assumed that (possibly signed) size_t bit values suffice to
141 represent chunk sizes. `Possibly signed' is due to the fact
142 that `size_t' may be defined on a system as either a signed or
143 an unsigned type. To be conservative, values that would appear
144 as negative numbers are avoided.
145 Requests for sizes with a negative sign bit when the request
146 size is treaded as a long will return null.
148 Maximum overhead wastage per allocated chunk: normally 15 bytes
150 Alignnment demands, plus the minimum allocatable size restriction
151 make the normal worst-case wastage 15 bytes (i.e., up to 15
152 more bytes will be allocated than were requested in malloc), with
153 one exception: because requests for zero bytes allocate non-zero space,
154 the worst case wastage for a request of zero bytes is 24 bytes.
158 Here are some features that are NOT currently supported
160 * No user-definable hooks for callbacks and the like.
161 * No automated mechanism for fully checking that all accesses
162 to malloced memory stay within their bounds.
163 * No support for compaction.
165 * Synopsis of compile-time options:
167 People have reported using previous versions of this malloc on all
168 versions of Unix, sometimes by tweaking some of the defines
169 below. It has been tested most extensively on Solaris and
170 Linux. It is also reported to work on WIN32 platforms.
171 People have also reported adapting this malloc for use in
172 stand-alone embedded systems.
174 The implementation is in straight, hand-tuned ANSI C. Among other
175 consequences, it uses a lot of macros. Because of this, to be at
176 all usable, this code should be compiled using an optimizing compiler
177 (for example gcc -O2) that can simplify expressions and control
180 CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG (default: NOT defined)
181 Define to enable debugging. Adds fairly extensive assertion-based
182 checking to help track down memory errors, but noticeably slows down
184 MALLOC_LOCK (default: NOT defined)
185 MALLOC_UNLOCK (default: NOT defined)
186 Define these to C expressions which are run to lock and unlock
187 the malloc data structures. Calls may be nested; that is,
188 MALLOC_LOCK may be called more than once before the corresponding
189 MALLOC_UNLOCK calls. MALLOC_LOCK must avoid waiting for a lock
190 that it already holds.
191 MALLOC_ALIGNMENT (default: NOT defined)
192 Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
193 which is the normal default.
194 SIZE_T_SMALLER_THAN_LONG (default: NOT defined)
195 Define this when the platform you are compiling has
196 sizeof(long) > sizeof(size_t).
197 The option causes some extra code to be generated to handle operations
198 that use size_t operands and have long results.
199 INTERNAL_SIZE_T (default: size_t)
200 Define to a 32-bit type (probably `unsigned int') if you are on a
201 64-bit machine, yet do not want or need to allow malloc requests of
202 greater than 2^31 to be handled. This saves space, especially for
207 //----------------------------------------------------------------------------
212 #include <pkgconf/memalloc.h> // configuration header
213 #include <pkgconf/infra.h> // CYGDBG_USE_ASSERTS
214 #include <cyg/infra/cyg_type.h> // types
215 #include <cyg/infra/cyg_ass.h> // assertions
216 #include <stddef.h> // for size_t
217 #include <cyg/memalloc/dlmalloc.hxx>
222 Because freed chunks may be overwritten with link fields, this
223 malloc will often die when freed memory is overwritten by user
224 programs. This can be very effective (albeit in an annoying way)
225 in helping track down dangling pointers.
227 If you compile with CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG enabled, a
228 number of assertion checks are
229 enabled that will catch more memory errors. You probably won't be
230 able to make much sense of the actual assertion errors, but they
231 should help you locate incorrectly overwritten memory. The
232 checking is fairly extensive, and will slow down execution
233 noticeably. Calling get_status() with DEBUG set will
234 attempt to check every allocated and free chunk in the
235 course of computing the summmaries.
237 Setting CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG may also be helpful if you
238 are trying to modify this code. The assertions in the check routines
239 spell out in more detail the assumptions and invariants underlying
244 #ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
245 # define ASSERT(x) CYG_ASSERTC( x )
247 # define ASSERT(x) ((void)0)
252 Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
253 lock and unlock the malloc data structures. MALLOC_LOCK may be
261 #ifndef MALLOC_UNLOCK
262 #define MALLOC_UNLOCK
266 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
267 of chunk sizes. On a 64-bit machine, you can reduce malloc
268 overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
269 at the expense of not being able to handle requests greater than
270 2^31. This limitation is hardly ever a concern; you are encouraged
271 to set this. However, the default version is the same as size_t.
274 #ifndef INTERNAL_SIZE_T
275 #define INTERNAL_SIZE_T Cyg_Mempool_dlmalloc_Implementation::Cyg_dlmalloc_size_t
279 Following is needed on implementations whereby long > size_t.
280 The problem is caused because the code performs subtractions of
281 size_t values and stores the result in long values. In the case
282 where long > size_t and the first value is actually less than
283 the second value, the resultant value is positive. For example,
284 (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
285 which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the
286 fact that assignment from unsigned to signed won't sign extend.
289 #ifdef SIZE_T_SMALLER_THAN_LONG
290 #define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
292 #define long_sub_size_t(x, y) ( (long)(x - y) )
296 #ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY
298 #include <string.h> // memmove, memset
300 /* The following macros are only invoked with (2n+1)-multiples of
301 INTERNAL_SIZE_T units, with a positive integer n. This is exploited
302 for fast inline execution when n is small. */
304 #define MALLOC_ZERO(charp, nbytes) \
306 INTERNAL_SIZE_T mzsz = (nbytes); \
307 if(mzsz <= 9*sizeof(mzsz)) { \
308 INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \
309 if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
311 if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
313 if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
318 } else memset((charp), 0, mzsz); \
321 #define MALLOC_COPY(dest,src,nbytes) \
323 INTERNAL_SIZE_T mcsz = (nbytes); \
324 if(mcsz <= 9*sizeof(mcsz)) { \
325 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \
326 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \
327 if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
328 *mcdst++ = *mcsrc++; \
329 if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
330 *mcdst++ = *mcsrc++; \
331 if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
332 *mcdst++ = *mcsrc++; }}} \
333 *mcdst++ = *mcsrc++; \
334 *mcdst++ = *mcsrc++; \
336 } else memmove(dest, src, mcsz); \
339 #else /* !CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY */
341 /* Use Duff's device for good zeroing/copying performance. */
343 #define MALLOC_ZERO(charp, nbytes) \
345 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
346 long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
347 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
349 case 0: for(;;) { *mzp++ = 0; \
350 case 7: *mzp++ = 0; \
351 case 6: *mzp++ = 0; \
352 case 5: *mzp++ = 0; \
353 case 4: *mzp++ = 0; \
354 case 3: *mzp++ = 0; \
355 case 2: *mzp++ = 0; \
356 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
360 #define MALLOC_COPY(dest,src,nbytes) \
362 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
363 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
364 long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
365 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
367 case 0: for(;;) { *mcdst++ = *mcsrc++; \
368 case 7: *mcdst++ = *mcsrc++; \
369 case 6: *mcdst++ = *mcsrc++; \
370 case 5: *mcdst++ = *mcsrc++; \
371 case 4: *mcdst++ = *mcsrc++; \
372 case 3: *mcdst++ = *mcsrc++; \
373 case 2: *mcdst++ = *mcsrc++; \
374 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
381 //----------------------------------------------------------------------------
384 malloc_chunk details:
386 (The following includes lightly edited explanations by Colin Plumb.)
388 Chunks of memory are maintained using a `boundary tag' method as
389 described in e.g., Knuth or Standish. (See the paper by Paul
390 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
391 survey of such techniques.) Sizes of free chunks are stored both
392 in the front of each chunk and at the end. This makes
393 consolidating fragmented chunks into bigger chunks very fast. The
394 size fields also hold bits representing whether chunks are free or
397 An allocated chunk looks like this:
400 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
401 | Size of previous chunk, if allocated | |
402 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
403 | Size of chunk, in bytes |P|
404 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
405 | User data starts here... .
407 . (malloc_usable_space() bytes) .
409 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
411 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
414 Where "chunk" is the front of the chunk for the purpose of most of
415 the malloc code, but "mem" is the pointer that is returned to the
416 user. "Nextchunk" is the beginning of the next contiguous chunk.
418 Chunks always begin on even word boundries, so the mem portion
419 (which is returned to the user) is also on an even word boundary, and
420 thus double-word aligned.
422 Free chunks are stored in circular doubly-linked lists, and look like this:
424 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
425 | Size of previous chunk |
426 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
427 `head:' | Size of chunk, in bytes |P|
428 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
429 | Forward pointer to next chunk in list |
430 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
431 | Back pointer to previous chunk in list |
432 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
433 | Unused space (may be 0 bytes long) .
436 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
437 `foot:' | Size of chunk, in bytes |
438 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
441 chunk size (which is always a multiple of two words), is an in-use
442 bit for the *previous* chunk. If that bit is *clear*, then the
443 word before the current chunk size contains the previous chunk
444 size, and can be used to find the front of the previous chunk.
445 (The very first chunk allocated always has this bit set,
446 preventing access to non-existent (or non-owned) memory.)
448 Note that the `foot' of the current chunk is actually represented
449 as the prev_size of the NEXT chunk. (This makes it easier to
450 deal with alignments etc).
452 The exception to all this is the special chunk `top', which doesn't
453 bother using the trailing size field since there is no next
454 contiguous chunk that would have to index off it. (After
455 initialization, `top' is forced to always exist. )
457 Available chunks are kept in any of several places (all declared below):
459 * `av': An array of chunks serving as bin headers for consolidated
460 chunks. Each bin is doubly linked. The bins are approximately
461 proportionally (log) spaced. There are a lot of these bins
462 (128). This may look excessive, but works very well in
463 practice. All procedures maintain the invariant that no
464 consolidated chunk physically borders another one. Chunks in
465 bins are kept in size order, with ties going to the
466 approximately least recently used chunk.
468 The chunks in each bin are maintained in decreasing sorted order by
469 size. This is irrelevant for the small bins, which all contain
470 the same-sized chunks, but facilitates best-fit allocation for
471 larger chunks. (These lists are just sequential. Keeping them in
472 order almost never requires enough traversal to warrant using
473 fancier ordered data structures.) Chunks of the same size are
474 linked with the most recently freed at the front, and allocations
475 are taken from the back. This results in LRU or FIFO allocation
476 order, which tends to give each chunk an equal opportunity to be
477 consolidated with adjacent freed chunks, resulting in larger free
478 chunks and less fragmentation.
480 * `top': The top-most available chunk (i.e., the one bordering the
481 end of available memory) is treated specially. It is never
482 included in any bin, is used only if no other chunk is
485 * `last_remainder': A bin holding only the remainder of the
486 most recently split (non-top) chunk. This bin is checked
487 before other non-fitting chunks, so as to provide better
488 locality for runs of sequentially allocated chunks.
492 typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mchunkptr;
494 /* sizes, alignments */
496 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
498 #ifdef CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT
499 #define MALLOC_ALIGNMENT (1<<(CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT))
502 #ifndef MALLOC_ALIGNMENT
503 #define MALLOC_ALIGN 8
504 #define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
506 #define MALLOC_ALIGN MALLOC_ALIGNMENT
508 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
510 (sizeof(struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk))
512 /* conversion from malloc headers to user pointers, and back */
514 #define chunk2mem(p) ((cyg_uint8*)((char*)(p) + 2*SIZE_SZ))
515 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
517 /* pad request bytes into a usable size */
519 #define request2size(req) \
520 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
521 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
522 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
524 /* Check if m has acceptable alignment */
526 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
530 Physical chunk operations
534 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
536 #define PREV_INUSE 0x1
538 /* Bits to mask off when extracting size */
540 #define SIZE_BITS (PREV_INUSE)
543 /* Ptr to next physical malloc_chunk. */
545 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
547 /* Ptr to previous physical malloc_chunk */
549 #define prev_chunk(p)\
550 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
553 /* Treat space at ptr + offset as a chunk */
555 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
558 Dealing with use bits
561 /* extract p's inuse bit */
564 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
566 /* extract inuse bit of previous chunk */
568 #define prev_inuse(p) ((p)->size & PREV_INUSE)
570 /* set/clear chunk as in use without otherwise disturbing */
572 #define set_inuse(p)\
573 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
575 #define clear_inuse(p)\
576 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
578 /* check/set/clear inuse bits in known places */
580 #define inuse_bit_at_offset(p, s)\
581 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
583 #define set_inuse_bit_at_offset(p, s)\
584 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
586 #define clear_inuse_bit_at_offset(p, s)\
587 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
591 Dealing with size fields
594 /* Get size, ignoring use bits */
596 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
598 /* Set size at head, without disturbing its use bit */
600 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
602 /* Set size/use ignoring previous bits in header */
604 #define set_head(p, s) ((p)->size = (s))
606 /* Set size at footer (only when chunk is not in use) */
608 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
611 //----------------------------------------------------------------------------
616 The bins, `av_' are an array of pairs of pointers serving as the
617 heads of (initially empty) doubly-linked lists of chunks, laid out
618 in a way so that each pair can be treated as if it were in a
619 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
620 and chunks are the same).
622 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
623 8 bytes apart. Larger bins are approximately logarithmically
624 spaced. (See the table below.) The `av_' array is never mentioned
625 directly in the code, but instead via bin access macros.
634 2 bins of size 262144
635 1 bin of size what's left
637 There is actually a little bit of slop in the numbers in bin_index
638 for the sake of speed. This makes no difference elsewhere.
640 The special chunks `top' and `last_remainder' get their own bins,
641 (this is implemented via yet more trickery with the av_ array),
642 although `top' is never properly linked to its bin since it is
643 always handled specially.
647 typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mbinptr;
651 #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
652 #define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
653 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
656 The first 2 bins are never indexed. The corresponding av_ cells are instead
657 used for bookkeeping. This is not to save space, but to simplify
658 indexing, maintain locality, and avoid some initialization tests.
661 #define top (bin_at(0)->fd) /* The topmost chunk */
662 #define last_remainder (bin_at(1)) /* remainder from last split */
665 /* Helper macro to initialize bins */
667 #define IAV(i) bin_at(i), bin_at(i)
669 #ifndef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
670 static mbinptr av_[CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2] = {
672 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
673 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
674 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
675 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
676 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
677 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
678 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
679 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
680 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
681 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
682 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
683 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
684 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
685 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
686 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
687 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
691 /* field-extraction macros */
693 #define first(b) ((b)->fd)
694 #define last(b) ((b)->bk)
700 #define bin_index(sz) \
701 (((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
702 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
703 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
704 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
705 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
706 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
709 bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
710 identically sized chunks. This is exploited in malloc.
713 #define MAX_SMALLBIN_SIZE 512
714 #define SMALLBIN_WIDTH 8
715 #define SMALLBIN_WIDTH_BITS 3
716 #define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
718 #define smallbin_index(sz) (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
721 Requests are `small' if both the corresponding and the next bin are small
724 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
727 To help compensate for the large number of bins, a one-level index
728 structure is used for bin-by-bin searching. `binblocks' is a
729 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
730 have any (possibly) non-empty bins, so they can be skipped over
731 all at once during during traversals. The bits are NOT always
732 cleared as soon as all bins in a block are empty, but instead only
733 when all are noticed to be empty during traversal in malloc.
736 #define BINBLOCKWIDTH 4 /* bins per block */
738 #define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */
740 /* bin<->block macros */
742 #define idx2binblock(ix) ((unsigned long)1 << (ix / BINBLOCKWIDTH))
743 #define mark_binblock(ii) (binblocks |= idx2binblock(ii))
744 #define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii)))
747 //----------------------------------------------------------------------------
753 #ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
756 These routines make a number of assertions about the states
757 of data structures that should be true at all times. If any
758 are not true, it's very likely that a user program has somehow
759 trashed memory. (It's also possible that there is a coding error
760 in malloc. In which case, please report it!)
764 Cyg_Mempool_dlmalloc_Implementation::do_check_chunk( mchunkptr p )
766 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
768 /* Check for legal address ... */
769 ASSERT((cyg_uint8 *)p >= arenabase);
771 ASSERT((cyg_uint8 *)p + sz <= (cyg_uint8 *)top);
773 ASSERT((cyg_uint8 *)p + sz <= arenabase + arenasize);
775 } // Cyg_Mempool_dlmalloc_Implementation::do_check_chunk()
779 Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk(mchunkptr p)
781 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
782 mchunkptr next = chunk_at_offset(p, sz);
786 /* Check whether it claims to be free ... */
789 /* Unless a special marker, must have OK fields */
790 if ((long)sz >= (long)MINSIZE)
792 ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
793 ASSERT(aligned_OK(chunk2mem(p)));
794 /* ... matching footer field */
795 ASSERT(next->prev_size == sz);
796 /* ... and is fully consolidated */
797 ASSERT(prev_inuse(p));
798 ASSERT (next == top || inuse(next));
800 /* ... and has minimally sane links */
801 ASSERT(p->fd->bk == p);
802 ASSERT(p->bk->fd == p);
804 else /* markers are always of size SIZE_SZ */
805 ASSERT(sz == SIZE_SZ);
806 } // Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk()
809 Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(mchunkptr p)
811 mchunkptr next = next_chunk(p);
814 /* Check whether it claims to be in use ... */
817 /* ... and is surrounded by OK chunks.
818 Since more things can be checked with free chunks than inuse ones,
819 if an inuse chunk borders them and debug is on, it's worth doing them.
823 mchunkptr prv = prev_chunk(p);
824 ASSERT(next_chunk(prv) == p);
825 do_check_free_chunk(prv);
829 ASSERT(prev_inuse(next));
830 ASSERT(chunksize(next) >= MINSIZE);
832 else if (!inuse(next))
833 do_check_free_chunk(next);
835 } // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(
838 Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p,
841 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
842 long room = long_sub_size_t(sz, s);
844 do_check_inuse_chunk(p);
847 ASSERT((long)sz >= (long)MINSIZE);
848 ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
850 ASSERT(room < (long)MINSIZE);
852 /* ... and alignment */
853 ASSERT(aligned_OK(chunk2mem(p)));
856 /* ... and was allocated at front of an available chunk */
857 ASSERT(prev_inuse(p));
859 } // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(
862 #define check_free_chunk(P) do_check_free_chunk(P)
863 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
864 #define check_chunk(P) do_check_chunk(P)
865 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
867 #define check_free_chunk(P)
868 #define check_inuse_chunk(P)
869 #define check_chunk(P)
870 #define check_malloced_chunk(P,N)
874 //----------------------------------------------------------------------------
877 Macro-based internal utilities
882 Linking chunks in bin lists.
883 Call these only with variables, not arbitrary expressions, as arguments.
887 Place chunk p of size s in its bin, in size order,
888 putting it ahead of others of same size.
892 #define frontlink(P, S, IDX, BK, FD) \
894 if (S < MAX_SMALLBIN_SIZE) \
896 IDX = smallbin_index(S); \
897 mark_binblock(IDX); \
902 FD->bk = BK->fd = P; \
906 IDX = bin_index(S); \
909 if (FD == BK) mark_binblock(IDX); \
912 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
917 FD->bk = BK->fd = P; \
922 /* take a chunk off a list */
924 #define unlink(P, BK, FD) \
932 /* Place p as the last remainder */
934 #define link_last_remainder(P) \
936 last_remainder->fd = last_remainder->bk = P; \
937 P->fd = P->bk = last_remainder; \
940 /* Clear the last_remainder bin */
942 #define clear_last_remainder \
943 (last_remainder->fd = last_remainder->bk = last_remainder)
946 //----------------------------------------------------------------------------
948 Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation(
949 cyg_uint8 *base, cyg_int32 size,
950 CYG_ADDRWORD /* argthru */ )
955 CYG_ADDRESS front_misalign;
956 cyg_int32 correction;
958 #ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
961 for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) {
962 av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i);
965 #elif defined(CYGDBG_USE_ASSERTS)
966 static int instances;
967 if ( ++instances > 1 )
968 CYG_FAIL( "Multiple dlmalloc instances but "
969 "CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE "
973 front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK;
975 if ( front_misalign > 0 ) {
976 correction = (MALLOC_ALIGNMENT) - front_misalign;
981 // too small to be useful?
982 if ( correction + 2*(unsigned)MALLOC_ALIGNMENT > (unsigned) size )
983 // help catch errors. Don't fail now.
986 top = (mchunkptr)(base + correction);
987 set_head(top, arenasize | PREV_INUSE);
991 //----------------------------------------------------------------------------
993 /* Main public routines */
998 The requested size is first converted into a usable form, `nb'.
999 This currently means to add 4 bytes overhead plus possibly more to
1000 obtain 8-byte alignment and/or to obtain a size of at least
1001 MINSIZE (currently 16 bytes), the smallest allocatable size.
1002 (All fits are considered `exact' if they are within MINSIZE bytes.)
1004 From there, the first successful of the following steps is taken:
1006 1. The bin corresponding to the request size is scanned, and if
1007 a chunk of exactly the right size is found, it is taken.
1009 2. The most recently remaindered chunk is used if it is big
1010 enough. This is a form of (roving) first fit, used only in
1011 the absence of exact fits. Runs of consecutive requests use
1012 the remainder of the chunk used for the previous such request
1013 whenever possible. This limited use of a first-fit style
1014 allocation strategy tends to give contiguous chunks
1015 coextensive lifetimes, which improves locality and can reduce
1016 fragmentation in the long run.
1018 3. Other bins are scanned in increasing size order, using a
1019 chunk big enough to fulfill the request, and splitting off
1020 any remainder. This search is strictly by best-fit; i.e.,
1021 the smallest (with ties going to approximately the least
1022 recently used) chunk that fits is selected.
1024 4. If large enough, the chunk bordering the end of memory
1025 (`top') is split off. (This use of `top' is in accord with
1026 the best-fit search rule. In effect, `top' is treated as
1027 larger (and thus less well fitting) than any other available
1028 chunk since it can be extended to be as large as necessary
1029 (up to system limitations).
1031 All allocations are made from the the `lowest' part of any found
1032 chunk. (The implementation invariant is that prev_inuse is
1033 always true of any allocated chunk; i.e., that each allocated
1034 chunk borders either a previously allocated and still in-use chunk,
1035 or the base of its memory arena.)
1040 Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes )
1042 mchunkptr victim; /* inspected/selected chunk */
1043 INTERNAL_SIZE_T victim_size; /* its size */
1044 int idx; /* index for bin traversal */
1045 mbinptr bin; /* associated bin */
1046 mchunkptr remainder; /* remainder from a split */
1047 long remainder_size; /* its size */
1048 int remainder_index; /* its bin index */
1049 unsigned long block; /* block traverser bit */
1050 int startidx; /* first bin of a traversed block */
1051 mchunkptr fwd; /* misc temp for linking */
1052 mchunkptr bck; /* misc temp for linking */
1053 mbinptr q; /* misc temp */
1057 /* Allow uninitialised (zero sized) heaps because they could exist as a
1058 * quirk of the MLT setup where a dynamically sized heap is at the top of
1060 if (NULL==arenabase) return NULL;
1062 if ((long)bytes < 0) return 0;
1064 nb = request2size(bytes); /* padded request size; */
1068 /* Check for exact match in a bin */
1070 if (is_small_request(nb)) /* Faster version for small requests */
1072 idx = smallbin_index(nb);
1074 /* No traversal or size check necessary for small bins. */
1079 #if MALLOC_ALIGN != 16
1080 /* Also scan the next one, since it would have a remainder < MINSIZE */
1089 victim_size = chunksize(victim);
1090 unlink(victim, bck, fwd);
1091 set_inuse_bit_at_offset(victim, victim_size);
1092 check_malloced_chunk(victim, nb);
1094 return chunk2mem(victim);
1097 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1102 idx = bin_index(nb);
1105 for (victim = last(bin); victim != bin; victim = victim->bk)
1107 victim_size = chunksize(victim);
1108 remainder_size = long_sub_size_t(victim_size, nb);
1110 if (remainder_size >= (long)MINSIZE) /* too big */
1112 --idx; /* adjust to rescan below after checking last remainder */
1116 else if (remainder_size >= 0) /* exact fit */
1118 unlink(victim, bck, fwd);
1119 set_inuse_bit_at_offset(victim, victim_size);
1120 check_malloced_chunk(victim, nb);
1122 return chunk2mem(victim);
1130 /* Try to use the last split-off remainder */
1132 if ( (victim = last_remainder->fd) != last_remainder)
1134 victim_size = chunksize(victim);
1135 remainder_size = long_sub_size_t(victim_size, nb);
1137 if (remainder_size >= (long)MINSIZE) /* re-split */
1139 remainder = chunk_at_offset(victim, nb);
1140 set_head(victim, nb | PREV_INUSE);
1141 link_last_remainder(remainder);
1142 set_head(remainder, remainder_size | PREV_INUSE);
1143 set_foot(remainder, remainder_size);
1144 check_malloced_chunk(victim, nb);
1146 return chunk2mem(victim);
1149 clear_last_remainder;
1151 if (remainder_size >= 0) /* exhaust */
1153 set_inuse_bit_at_offset(victim, victim_size);
1154 check_malloced_chunk(victim, nb);
1156 return chunk2mem(victim);
1159 /* Else place in bin */
1161 frontlink(victim, victim_size, remainder_index, bck, fwd);
1165 If there are any possibly nonempty big-enough blocks,
1166 search for best fitting chunk by scanning bins in blockwidth units.
1169 if ( (block = idx2binblock(idx)) <= binblocks)
1172 /* Get to the first marked block */
1174 if ( (block & binblocks) == 0)
1176 /* force to an even block boundary */
1177 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1179 while ((block & binblocks) == 0)
1181 idx += BINBLOCKWIDTH;
1186 /* For each possibly nonempty block ... */
1189 startidx = idx; /* (track incomplete blocks) */
1190 q = bin = bin_at(idx);
1192 /* For each bin in this block ... */
1195 /* Find and use first big enough chunk ... */
1197 for (victim = last(bin); victim != bin; victim = victim->bk)
1199 victim_size = chunksize(victim);
1200 remainder_size = long_sub_size_t(victim_size, nb);
1202 if (remainder_size >= (long)MINSIZE) /* split */
1204 remainder = chunk_at_offset(victim, nb);
1205 set_head(victim, nb | PREV_INUSE);
1206 unlink(victim, bck, fwd);
1207 link_last_remainder(remainder);
1208 set_head(remainder, remainder_size | PREV_INUSE);
1209 set_foot(remainder, remainder_size);
1210 check_malloced_chunk(victim, nb);
1212 return chunk2mem(victim);
1215 else if (remainder_size >= 0) /* take */
1217 set_inuse_bit_at_offset(victim, victim_size);
1218 unlink(victim, bck, fwd);
1219 check_malloced_chunk(victim, nb);
1221 return chunk2mem(victim);
1226 bin = next_bin(bin);
1228 #if MALLOC_ALIGN == 16
1229 if (idx < MAX_SMALLBIN)
1231 bin = next_bin(bin);
1235 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1237 /* Clear out the block bit. */
1239 do /* Possibly backtrack to try to clear a partial block */
1241 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1243 binblocks &= ~block;
1248 } while (first(q) == q);
1250 /* Get to the next possibly nonempty block */
1252 if ( (block <<= 1) <= binblocks && (block != 0) )
1254 while ((block & binblocks) == 0)
1256 idx += BINBLOCKWIDTH;
1266 /* Try to use top chunk */
1268 /* Require that there be a remainder, ensuring top always exists */
1269 remainder_size = long_sub_size_t(chunksize(top), nb);
1270 if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
1272 //diag_printf("chunksize(top)=%ld, nb=%d, remainder=%ld\n", chunksize(top),
1273 // nb, remainder_size);
1275 CYG_MEMALLOC_FAIL(bytes);
1276 return NULL; /* propagate failure */
1280 set_head(victim, nb | PREV_INUSE);
1281 top = chunk_at_offset(victim, nb);
1282 set_head(top, remainder_size | PREV_INUSE);
1283 check_malloced_chunk(victim, nb);
1285 return chunk2mem(victim);
1287 } // Cyg_Mempool_dlmalloc_Implementation::try_alloc()
1289 //----------------------------------------------------------------------------
1296 1. free(NULL) has no effect.
1298 2. Chunks are consolidated as they arrive, and
1299 placed in corresponding bins. (This includes the case of
1300 consolidating with the current `last_remainder').
1304 Cyg_Mempool_dlmalloc_Implementation::free( cyg_uint8 *mem, cyg_int32 )
1306 mchunkptr p; /* chunk corresponding to mem */
1307 INTERNAL_SIZE_T hd; /* its head field */
1308 INTERNAL_SIZE_T sz; /* its size */
1309 int idx; /* its bin index */
1310 mchunkptr next; /* next contiguous chunk */
1311 INTERNAL_SIZE_T nextsz; /* its size */
1312 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1313 mchunkptr bck; /* misc temp for linking */
1314 mchunkptr fwd; /* misc temp for linking */
1315 int islr; /* track whether merging with last_remainder */
1317 if (mem == NULL) /* free(NULL) has no effect */
1325 check_inuse_chunk(p);
1327 sz = hd & ~PREV_INUSE;
1328 next = chunk_at_offset(p, sz);
1329 nextsz = chunksize(next);
1331 if (next == top) /* merge with top */
1335 if (!(hd & PREV_INUSE)) /* consolidate backward */
1337 prevsz = p->prev_size;
1338 p = chunk_at_offset(p, -((long) prevsz));
1340 unlink(p, bck, fwd);
1343 set_head(p, sz | PREV_INUSE);
1349 set_head(next, nextsz); /* clear inuse bit */
1353 if (!(hd & PREV_INUSE)) /* consolidate backward */
1355 prevsz = p->prev_size;
1356 p = chunk_at_offset(p, -((long) prevsz));
1359 if (p->fd == last_remainder) /* keep as last_remainder */
1362 unlink(p, bck, fwd);
1365 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1369 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1372 link_last_remainder(p);
1375 unlink(next, bck, fwd);
1379 set_head(p, sz | PREV_INUSE);
1382 frontlink(p, sz, idx, bck, fwd);
1387 } // Cyg_Mempool_dlmalloc_Implementation::free()
1389 //----------------------------------------------------------------------------
1391 // resize existing allocation, if oldsize is non-NULL, previous
1392 // allocation size is placed into it. If previous size not available,
1393 // it is set to 0. NB previous allocation size may have been rounded up.
1394 // Occasionally the allocation can be adjusted *backwards* as well as,
1395 // or instead of forwards, therefore the address of the resized
1396 // allocation is returned, or NULL if no resizing was possible.
1397 // Note that this differs from ::realloc() in that no attempt is
1398 // made to call malloc() if resizing is not possible - that is left
1399 // to higher layers. The data is copied from old to new though.
1400 // The effects of alloc_ptr==NULL or newsize==0 are undefined
1403 // DOCUMENTATION FROM ORIGINAL FILE:
1404 // (some now irrelevant parts elided)
1408 If the reallocation is for additional space, and the
1409 chunk can be extended, it is, else a malloc-copy-free sequence is
1410 taken. There are several different ways that a chunk could be
1411 extended. All are tried:
1413 * Extending forward into following adjacent free chunk.
1414 * Shifting backwards, joining preceding adjacent space
1415 * Both shifting backwards and extending forward.
1417 If the reallocation is for less space, and the new request is for
1418 a `small' (<512 bytes) size, then the newly unused space is lopped
1421 The old unix realloc convention of allowing the last-free'd chunk
1422 to be used as an argument to realloc is no longer supported.
1423 I don't know of any programs still relying on this feature,
1424 and allowing it would also allow too many other incorrect
1425 usages of realloc to be sensible.
1429 Cyg_Mempool_dlmalloc_Implementation::resize_alloc( cyg_uint8 *oldmem,
1431 cyg_int32 *poldsize )
1434 INTERNAL_SIZE_T nb; /* padded request size */
1436 mchunkptr oldp; /* chunk corresponding to oldmem */
1437 INTERNAL_SIZE_T oldsize; /* its size */
1439 mchunkptr newp; /* chunk to return */
1440 INTERNAL_SIZE_T newsize; /* its size */
1441 cyg_uint8* newmem; /* corresponding user mem */
1443 mchunkptr next; /* next contiguous chunk after oldp */
1444 INTERNAL_SIZE_T nextsize; /* its size */
1446 mchunkptr prev; /* previous contiguous chunk before oldp */
1447 INTERNAL_SIZE_T prevsize; /* its size */
1449 mchunkptr remainder; /* holds split off extra space from newp */
1450 INTERNAL_SIZE_T remainder_size; /* its size */
1452 mchunkptr bck; /* misc temp for linking */
1453 mchunkptr fwd; /* misc temp for linking */
1457 newp = oldp = mem2chunk(oldmem);
1458 newsize = oldsize = chunksize(oldp);
1460 if (NULL != poldsize)
1461 *poldsize = oldsize - SIZE_SZ;
1463 nb = request2size(bytes);
1465 check_inuse_chunk(oldp);
1467 if ((long)(oldsize) < (long)(nb))
1470 /* Try expanding forward */
1472 next = chunk_at_offset(oldp, oldsize);
1473 if (next == top || !inuse(next))
1475 nextsize = chunksize(next);
1477 /* Forward into top only if a remainder */
1480 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1482 newsize += nextsize;
1483 top = chunk_at_offset(oldp, nb);
1484 set_head(top, (newsize - nb) | PREV_INUSE);
1485 set_head_size(oldp, nb);
1487 return chunk2mem(oldp);
1491 /* Forward into next chunk */
1492 else if (((long)(nextsize + newsize) >= (long)(nb)))
1494 unlink(next, bck, fwd);
1495 newsize += nextsize;
1505 /* Try shifting backwards. */
1507 if (!prev_inuse(oldp))
1509 prev = prev_chunk(oldp);
1510 prevsize = chunksize(prev);
1512 /* try forward + backward first to save a later consolidation */
1519 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1521 unlink(prev, bck, fwd);
1523 newsize += prevsize + nextsize;
1524 newmem = chunk2mem(newp);
1525 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1526 top = chunk_at_offset(newp, nb);
1527 set_head(top, (newsize - nb) | PREV_INUSE);
1528 set_head_size(newp, nb);
1534 /* into next chunk */
1535 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1537 unlink(next, bck, fwd);
1538 unlink(prev, bck, fwd);
1540 newsize += nextsize + prevsize;
1541 newmem = chunk2mem(newp);
1542 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1548 if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
1550 unlink(prev, bck, fwd);
1552 newsize += prevsize;
1553 newmem = chunk2mem(newp);
1554 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1559 // couldn't resize the allocation any direction, so return failure
1565 split: /* split off extra room in old or expanded chunk */
1567 remainder_size = long_sub_size_t(newsize, nb);
1569 if (remainder_size >= (long)MINSIZE) /* split off remainder */
1571 remainder = chunk_at_offset(newp, nb);
1572 set_head_size(newp, nb);
1573 set_head(remainder, remainder_size | PREV_INUSE);
1574 set_inuse_bit_at_offset(remainder, remainder_size);
1575 /* let free() deal with it */
1576 Cyg_Mempool_dlmalloc_Implementation::free( chunk2mem(remainder) );
1580 set_head_size(newp, newsize);
1581 set_inuse_bit_at_offset(newp, newsize);
1584 check_inuse_chunk(newp);
1586 return chunk2mem(newp);
1588 } // Cyg_Mempool_dlmalloc_Implementation::resize_alloc()
1590 //----------------------------------------------------------------------------
1592 // Get memory pool status
1593 // flags is a bitmask of requested fields to fill in. The flags are
1594 // defined in common.hxx
1596 Cyg_Mempool_dlmalloc_Implementation::get_status(
1597 cyg_mempool_status_flag_t flags,
1598 Cyg_Mempool_Status &status )
1600 if (0 != (flags&(CYG_MEMPOOL_STAT_FREEBLOCKS|CYG_MEMPOOL_STAT_TOTALFREE|
1601 CYG_MEMPOOL_STAT_TOTALALLOCATED|CYG_MEMPOOL_STAT_MAXFREE)))
1606 cyg_int32 chunksizep;
1608 #ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1612 INTERNAL_SIZE_T avail = chunksize(top);
1613 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
1616 for (i = 1; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) {
1618 for (p = last(b); p != b; p = p->bk) {
1619 #ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1620 check_free_chunk(p);
1621 for (q = next_chunk(p);
1622 (q < top) && inuse(q) &&
1623 (long)(chunksize(q)) >= (long)MINSIZE;
1625 check_inuse_chunk(q);
1627 chunksizep = chunksize(p);
1628 avail += chunksizep;
1629 if ( chunksizep > maxfree )
1630 maxfree = chunksizep;
1635 if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) )
1636 status.totalallocated = arenasize - avail;
1637 // as quick or quicker to just set most of these, rather than
1639 status.totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1640 CYG_ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1641 >= MINSIZE, "free mem negative!" );
1642 status.freeblocks = navail;
1643 status.maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1644 //diag_printf("raw mf: %d, ret mf: %d\n", maxfree, status.maxfree);
1645 CYG_ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) &
1646 ~MALLOC_ALIGN_MASK) >= MINSIZE,
1647 "max free block size negative!" );
1650 // as quick or quicker to just set most of these, rather than
1652 status.arenabase = status.origbase = arenabase;
1653 status.arenasize = status.origsize = arenasize;
1654 status.maxoverhead = MINSIZE + MALLOC_ALIGNMENT;
1656 } // Cyg_Mempool_dlmalloc_Implementation::get_status()
1659 //----------------------------------------------------------------------------