modules/up/src/Core/gnu/malloc.h

/* [<][>]
[^][v][top][bottom][index][help] */

FUNCTIONS

This source file includes following functions.
  1. MALLOC_ZERO
  2. MALLOC_COPY
  3. MALLOC_ZERO
  4. MALLOC_COPY

   1 /* ---------- To make a malloc.h, start cutting here ------------ */
   2 
   3 /* 
   4   A version of malloc/free/realloc written by Doug Lea and released to the 
   5   public domain.  Send questions/comments/complaints/performance data
   6   to dl@cs.oswego.edu
   7 
   8 * VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
   9   
  10    Note: There may be an updated version of this malloc obtainable at
  11            ftp://g.oswego.edu/pub/misc/malloc.c
  12          Check before installing!
  13 
  14    Note: This version differs from 2.6.4 only by correcting a
  15          statement ordering error that could cause failures only
  16          when calls to this malloc are interposed with calls to
  17          other memory allocators.
  18 
  19 * Why use this malloc?
  20 
  21   This is not the fastest, most space-conserving, most portable, or
  22   most tunable malloc ever written. However it is among the fastest
  23   while also being among the most space-conserving, portable and tunable.
  24   Consistent balance across these factors results in a good general-purpose 
  25   allocator. For a high-level description, see 
  26      http://g.oswego.edu/dl/html/malloc.html
  27 
  28 * Synopsis of public routines
  29 
  30   (Much fuller descriptions are contained in the program documentation below.)
  31 
  32   malloc(size_t n);
  33      Return a pointer to a newly allocated chunk of at least n bytes, or null
  34      if no space is available.
  35   free(Void_t* p);
  36      Release the chunk of memory pointed to by p, or no effect if p is null.
  37   realloc(Void_t* p, size_t n);
  38      Return a pointer to a chunk of size n that contains the same data
  39      as does chunk p up to the minimum of (n, p's size) bytes, or null
  40      if no space is available. The returned pointer may or may not be
  41      the same as p. If p is null, equivalent to malloc.  Unless the
  42      #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
  43      size argument of zero (re)allocates a minimum-sized chunk.
  44   memalign(size_t alignment, size_t n);
  45      Return a pointer to a newly allocated chunk of n bytes, aligned
  46      in accord with the alignment argument, which must be a power of
  47      two.
  48   valloc(size_t n);
  49      Equivalent to memalign(pagesize, n), where pagesize is the page
  50      size of the system (or as near to this as can be figured out from
  51      all the includes/defines below.)
  52   pvalloc(size_t n);
  53      Equivalent to valloc(minimum-page-that-holds(n)), that is,
  54      round up n to nearest pagesize.
  55   calloc(size_t unit, size_t quantity);
  56      Returns a pointer to quantity * unit bytes, with all locations
  57      set to zero.
  58   cfree(Void_t* p);
  59      Equivalent to free(p).
  60   malloc_trim(size_t pad);
  61      Release all but pad bytes of freed top-most memory back 
  62      to the system. Return 1 if successful, else 0.
  63   malloc_usable_size(Void_t* p);
  64      Report the number usable allocated bytes associated with allocated
  65      chunk p. This may or may not report more bytes than were requested,
  66      due to alignment and minimum size constraints.
  67   malloc_stats();
  68      Prints brief summary statistics on stderr.
  69   mallinfo()
  70      Returns (by copy) a struct containing various summary statistics.
  71   mallopt(int parameter_number, int parameter_value)
  72      Changes one of the tunable parameters described below. Returns
  73      1 if successful in changing the parameter, else 0.
  74 
  75 * Vital statistics:
  76 
  77   Alignment:                            8-byte
  78        8 byte alignment is currently hardwired into the design.  This
  79        seems to suffice for all current machines and C compilers.
  80 
  81   Assumed pointer representation:       4 or 8 bytes
  82        Code for 8-byte pointers is untested by me but has worked
  83        reliably by Wolfram Gloger, who contributed most of the
  84        changes supporting this.
  85 
  86   Assumed size_t  representation:       4 or 8 bytes
  87        Note that size_t is allowed to be 4 bytes even if pointers are 8.        
  88 
  89   Minimum overhead per allocated chunk: 4 or 8 bytes
  90        Each malloced chunk has a hidden overhead of 4 bytes holding size
  91        and status information.  
  92 
  93   Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
  94                           8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
  95                                      
  96        When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
  97        ptrs but 4 byte size) or 24 (for 8/8) additional bytes are 
  98        needed; 4 (8) for a trailing size field
  99        and 8 (16) bytes for free list pointers. Thus, the minimum
 100        allocatable size is 16/24/32 bytes.
 101 
 102        Even a request for zero bytes (i.e., malloc(0)) returns a
 103        pointer to something of the minimum allocatable size.
 104 
 105   Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
 106                           8-byte size_t: 2^63 - 16 bytes
 107 
 108        It is assumed that (possibly signed) size_t bit values suffice to
 109        represent chunk sizes. `Possibly signed' is due to the fact
 110        that `size_t' may be defined on a system as either a signed or
 111        an unsigned type. To be conservative, values that would appear
 112        as negative numbers are avoided.  
 113        Requests for sizes with a negative sign bit will return a
 114        minimum-sized chunk.
 115 
 116   Maximum overhead wastage per allocated chunk: normally 15 bytes
 117 
 118        Alignnment demands, plus the minimum allocatable size restriction
 119        make the normal worst-case wastage 15 bytes (i.e., up to 15
 120        more bytes will be allocated than were requested in malloc), with 
 121        two exceptions:
 122          1. Because requests for zero bytes allocate non-zero space,
 123             the worst case wastage for a request of zero bytes is 24 bytes.
 124          2. For requests >= mmap_threshold that are serviced via
 125             mmap(), the worst case wastage is 8 bytes plus the remainder
 126             from a system page (the minimal mmap unit); typically 4096 bytes.
 127 
 128 * Limitations
 129 
 130     Here are some features that are NOT currently supported
 131 
 132     * No user-definable hooks for callbacks and the like.
 133     * No automated mechanism for fully checking that all accesses
 134       to malloced memory stay within their bounds.
 135     * No support for compaction.
 136 
 137 * Synopsis of compile-time options:
 138 
 139     People have reported using previous versions of this malloc on all
 140     versions of Unix, sometimes by tweaking some of the defines
 141     below. It has been tested most extensively on Solaris and
 142     Linux. It is also reported to work on WIN32 platforms.
 143     People have also reported adapting this malloc for use in
 144     stand-alone embedded systems.
 145 
 146     The implementation is in straight, hand-tuned ANSI C.  Among other
 147     consequences, it uses a lot of macros.  Because of this, to be at
 148     all usable, this code should be compiled using an optimizing compiler
 149     (for example gcc -O2) that can simplify expressions and control
 150     paths.
 151 
 152   __STD_C                  (default: derived from C compiler defines)
 153      Nonzero if using ANSI-standard C compiler, a C++ compiler, or
 154      a C compiler sufficiently close to ANSI to get away with it.
 155   DEBUG                    (default: NOT defined)
 156      Define to enable debugging. Adds fairly extensive assertion-based 
 157      checking to help track down memory errors, but noticeably slows down
 158      execution.
 159   REALLOC_ZERO_BYTES_FREES (default: NOT defined) 
 160      Define this if you think that realloc(p, 0) should be equivalent
 161      to free(p). Otherwise, since malloc returns a unique pointer for
 162      malloc(0), so does realloc(p, 0).
 163   HAVE_MEMCPY               (default: defined)
 164      Define if you are not otherwise using ANSI STD C, but still 
 165      have memcpy and memset in your C library and want to use them.
 166      Otherwise, simple internal versions are supplied.
 167   USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
 168      Define as 1 if you want the C library versions of memset and
 169      memcpy called in realloc and calloc (otherwise macro versions are used). 
 170      At least on some platforms, the simple macro versions usually
 171      outperform libc versions.
 172   HAVE_MMAP                 (default: defined as 1)
 173      Define to non-zero to optionally make malloc() use mmap() to
 174      allocate very large blocks.  
 175   HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
 176      Define to non-zero to optionally make realloc() use mremap() to
 177      reallocate very large blocks.  
 178   malloc_getpagesize        (default: derived from system #includes)
 179      Either a constant or routine call returning the system page size.
 180   HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) 
 181      Optionally define if you are on a system with a /usr/include/malloc.h
 182      that declares struct mallinfo. It is not at all necessary to
 183      define this even if you do, but will ensure consistency.
 184   INTERNAL_SIZE_T           (default: size_t)
 185      Define to a 32-bit type (probably `unsigned int') if you are on a 
 186      64-bit machine, yet do not want or need to allow malloc requests of 
 187      greater than 2^31 to be handled. This saves space, especially for
 188      very small chunks.
 189   INTERNAL_LINUX_C_LIB      (default: NOT defined)
 190      Defined only when compiled as part of Linux libc.
 191      Also note that there is some odd internal name-mangling via defines
 192      (for example, internally, `malloc' is named `mALLOc') needed
 193      when compiling in this case. These look funny but don't otherwise
 194      affect anything.
 195   WIN32                     (default: undefined)
 196      Define this on MS win (95, nt) platforms to compile in sbrk emulation.
 197   LACKS_UNISTD_H            (default: undefined)
 198      Define this if your system does not have a <unistd.h>.
 199   MORECORE                  (default: sbrk)
 200      The name of the routine to call to obtain more memory from the system.
 201   MORECORE_FAILURE          (default: -1)
 202      The value returned upon failure of MORECORE.
 203   MORECORE_CLEARS           (default 1)
 204      True (1) if the routine mapped to MORECORE zeroes out memory (which
 205      holds for sbrk).
 206   DEFAULT_TRIM_THRESHOLD
 207   DEFAULT_TOP_PAD       
 208   DEFAULT_MMAP_THRESHOLD
 209   DEFAULT_MMAP_MAX      
 210      Default values of tunable parameters (described in detail below)
 211      controlling interaction with host system routines (sbrk, mmap, etc).
 212      These values may also be changed dynamically via mallopt(). The
 213      preset defaults are those that give best performance for typical
 214      programs/systems.
 215 
 216 
 217 */
 218 
 219 
 220 
 221 
 222 /* Preliminaries */
 223 
 224 #ifndef __STD_C
 225 #ifdef __STDC__
 226 #define __STD_C     1
 227 #else
 228 #if __cplusplus
 229 #define __STD_C     1
 230 #else
 231 #define __STD_C     0
 232 #endif /*__cplusplus*/
 233 #endif /*__STDC__*/
 234 #endif /*__STD_C*/
 235 
 236 #ifndef Void_t
 237 #if __STD_C
 238 #define Void_t      void
 239 #else
 240 #define Void_t      char
 241 #endif
 242 #endif /*Void_t*/
 243 
 244 #if __STD_C
 245 #include <stddef.h>   /* for size_t */
 246 #else
 247 #include <sys/types.h>
 248 #endif
 249 
 250 #ifdef __cplusplus
 251 extern "C" {
 252 #endif
 253 
 254 #include <stdio.h>    /* needed for malloc_stats */
 255 
 256 
 257 /*
 258   Compile-time options
 259 */
 260 
 261 
 262 /*
 263     Debugging:
 264 
 265     Because freed chunks may be overwritten with link fields, this
 266     malloc will often die when freed memory is overwritten by user
 267     programs.  This can be very effective (albeit in an annoying way)
 268     in helping track down dangling pointers.
 269 
 270     If you compile with -DDEBUG, a number of assertion checks are
 271     enabled that will catch more memory errors. You probably won't be
 272     able to make much sense of the actual assertion errors, but they
 273     should help you locate incorrectly overwritten memory.  The
 274     checking is fairly extensive, and will slow down execution
 275     noticeably. Calling malloc_stats or mallinfo with DEBUG set will
 276     attempt to check every non-mmapped allocated and free chunk in the
 277     course of computing the summmaries. (By nature, mmapped regions
 278     cannot be checked very much automatically.)
 279 
 280     Setting DEBUG may also be helpful if you are trying to modify 
 281     this code. The assertions in the check routines spell out in more 
 282     detail the assumptions and invariants underlying the algorithms.
 283 
 284 */
 285 
 286 #if DEBUG 
 287 #include <assert.h>
 288 #else
 289 #define assert(x) ((void)0)
 290 #endif
 291 
 292 
 293 /*
 294   INTERNAL_SIZE_T is the word-size used for internal bookkeeping
 295   of chunk sizes. On a 64-bit machine, you can reduce malloc
 296   overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
 297   at the expense of not being able to handle requests greater than
 298   2^31. This limitation is hardly ever a concern; you are encouraged
 299   to set this. However, the default version is the same as size_t.
 300 */
 301 
 302 #ifndef INTERNAL_SIZE_T
 303 #define INTERNAL_SIZE_T size_t
 304 #endif
 305 
 306 /*
 307   REALLOC_ZERO_BYTES_FREES should be set if a call to
 308   realloc with zero bytes should be the same as a call to free.
 309   Some people think it should. Otherwise, since this malloc
 310   returns a unique pointer for malloc(0), so does realloc(p, 0). 
 311 */
 312 
 313 
 314 /*   #define REALLOC_ZERO_BYTES_FREES */
 315 
 316 
 317 /* 
 318   WIN32 causes an emulation of sbrk to be compiled in
 319   mmap-based options are not currently supported in WIN32.
 320 */
 321 
 322 /* #define WIN32 */
 323 #ifdef WIN32
 324 #define MORECORE wsbrk
 325 #define HAVE_MMAP 0
 326 #endif
 327 
 328 
 329 /*
 330   HAVE_MEMCPY should be defined if you are not otherwise using
 331   ANSI STD C, but still have memcpy and memset in your C library
 332   and want to use them in calloc and realloc. Otherwise simple
 333   macro versions are defined here.
 334 
 335   USE_MEMCPY should be defined as 1 if you actually want to
 336   have memset and memcpy called. People report that the macro
 337   versions are often enough faster than libc versions on many
 338   systems that it is better to use them. 
 339 
 340 */
 341 
 342 #define HAVE_MEMCPY 
 343 
 344 #ifndef USE_MEMCPY
 345 #ifdef HAVE_MEMCPY
 346 #define USE_MEMCPY 1
 347 #else
 348 #define USE_MEMCPY 0
 349 #endif
 350 #endif
 351 
 352 #if (__STD_C || defined(HAVE_MEMCPY)) 
 353 
 354 #if __STD_C
 355 void* memset(void*, int, size_t);
 356 void* memcpy(void*, const void*, size_t);
 357 #else
 358 Void_t* memset();
 359 Void_t* memcpy();
 360 #endif
 361 #endif
 362 
 363 #if USE_MEMCPY
 364 
 365 /* The following macros are only invoked with (2n+1)-multiples of
 366    INTERNAL_SIZE_T units, with a positive integer n. This is exploited
 367    for fast inline execution when n is small. */
 368 
 369 #define MALLOC_ZERO(charp, nbytes)                                            \
     /* [<][>][^][v][top][bottom][index][help] */
 370 do {                                                                          \
 371   INTERNAL_SIZE_T mzsz = (nbytes);                                            \
 372   if(mzsz <= 9*sizeof(mzsz)) {                                                \
 373     INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
 374     if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
 375                                      *mz++ = 0;                               \
 376       if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
 377                                      *mz++ = 0;                               \
 378         if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
 379                                      *mz++ = 0; }}}                           \
 380                                      *mz++ = 0;                               \
 381                                      *mz++ = 0;                               \
 382                                      *mz   = 0;                               \
 383   } else memset((charp), 0, mzsz);                                            \
 384 } while(0)
 385 
 386 #define MALLOC_COPY(dest,src,nbytes)                                          \
     /* [<][>][^][v][top][bottom][index][help] */
 387 do {                                                                          \
 388   INTERNAL_SIZE_T mcsz = (nbytes);                                            \
 389   if(mcsz <= 9*sizeof(mcsz)) {                                                \
 390     INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
 391     INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
 392     if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
 393                                      *mcdst++ = *mcsrc++;                     \
 394       if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
 395                                      *mcdst++ = *mcsrc++;                     \
 396         if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
 397                                      *mcdst++ = *mcsrc++; }}}                 \
 398                                      *mcdst++ = *mcsrc++;                     \
 399                                      *mcdst++ = *mcsrc++;                     \
 400                                      *mcdst   = *mcsrc  ;                     \
 401   } else memcpy(dest, src, mcsz);                                             \
 402 } while(0)
 403 
 404 #else /* !USE_MEMCPY */
 405 
 406 /* Use Duff's device for good zeroing/copying performance. */
 407 
 408 #define MALLOC_ZERO(charp, nbytes)                                            \
     /* [<][>][^][v][top][bottom][index][help] */
 409 do {                                                                          \
 410   INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
 411   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
 412   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
 413   switch (mctmp) {                                                            \
 414     case 0: for(;;) { *mzp++ = 0;                                             \
 415     case 7:           *mzp++ = 0;                                             \
 416     case 6:           *mzp++ = 0;                                             \
 417     case 5:           *mzp++ = 0;                                             \
 418     case 4:           *mzp++ = 0;                                             \
 419     case 3:           *mzp++ = 0;                                             \
 420     case 2:           *mzp++ = 0;                                             \
 421     case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
 422   }                                                                           \
 423 } while(0)
 424 
 425 #define MALLOC_COPY(dest,src,nbytes)                                          \
     /* [<][>][^][v][top][bottom][index][help] */
 426 do {                                                                          \
 427   INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
 428   INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
 429   long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
 430   if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
 431   switch (mctmp) {                                                            \
 432     case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
 433     case 7:           *mcdst++ = *mcsrc++;                                    \
 434     case 6:           *mcdst++ = *mcsrc++;                                    \
 435     case 5:           *mcdst++ = *mcsrc++;                                    \
 436     case 4:           *mcdst++ = *mcsrc++;                                    \
 437     case 3:           *mcdst++ = *mcsrc++;                                    \
 438     case 2:           *mcdst++ = *mcsrc++;                                    \
 439     case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
 440   }                                                                           \
 441 } while(0)
 442 
 443 #endif
 444 
 445 
 446 /*
 447   Define HAVE_MMAP to optionally make malloc() use mmap() to
 448   allocate very large blocks.  These will be returned to the
 449   operating system immediately after a free().
 450 */
 451 
 452 #ifndef HAVE_MMAP
 453 #define HAVE_MMAP 1
 454 #endif
 455 
 456 /*
 457   Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
 458   large blocks.  This is currently only possible on Linux with
 459   kernel versions newer than 1.3.77.
 460 */
 461 
 462 #ifndef HAVE_MREMAP
 463 #ifdef INTERNAL_LINUX_C_LIB
 464 #define HAVE_MREMAP 1
 465 #else
 466 #define HAVE_MREMAP 0
 467 #endif
 468 #endif
 469 
 470 #if HAVE_MMAP
 471 
 472 #include <unistd.h>
 473 #include <fcntl.h>
 474 #include <sys/mman.h>
 475 
 476 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
 477 #define MAP_ANONYMOUS MAP_ANON
 478 #endif
 479 
 480 #endif /* HAVE_MMAP */
 481 
 482 /*
 483   Access to system page size. To the extent possible, this malloc
 484   manages memory from the system in page-size units.
 485   
 486   The following mechanics for getpagesize were adapted from 
 487   bsd/gnu getpagesize.h 
 488 */
 489 
 490 #ifndef LACKS_UNISTD_H
 491 #  include <unistd.h>
 492 #endif
 493 
 494 #ifndef malloc_getpagesize
 495 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
 496 #    ifndef _SC_PAGE_SIZE
 497 #      define _SC_PAGE_SIZE _SC_PAGESIZE
 498 #    endif
 499 #  endif
 500 #  ifdef _SC_PAGE_SIZE
 501 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
 502 #  else
 503 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
 504        extern size_t getpagesize();
 505 #      define malloc_getpagesize getpagesize()
 506 #    else
 507 #      include <sys/param.h>
 508 #      ifdef EXEC_PAGESIZE
 509 #        define malloc_getpagesize EXEC_PAGESIZE
 510 #      else
 511 #        ifdef NBPG
 512 #          ifndef CLSIZE
 513 #            define malloc_getpagesize NBPG
 514 #          else
 515 #            define malloc_getpagesize (NBPG * CLSIZE)
 516 #          endif
 517 #        else 
 518 #          ifdef NBPC
 519 #            define malloc_getpagesize NBPC
 520 #          else
 521 #            ifdef PAGESIZE
 522 #              define malloc_getpagesize PAGESIZE
 523 #            else
 524 #              define malloc_getpagesize (4096) /* just guess */
 525 #            endif
 526 #          endif
 527 #        endif 
 528 #      endif
 529 #    endif 
 530 #  endif
 531 #endif
 532 
 533 
 534 
 535 /*
 536 
 537   This version of malloc supports the standard SVID/XPG mallinfo
 538   routine that returns a struct containing the same kind of
 539   information you can get from malloc_stats. It should work on
 540   any SVID/XPG compliant system that has a /usr/include/malloc.h
 541   defining struct mallinfo. (If you'd like to install such a thing
 542   yourself, cut out the preliminary declarations as described above
 543   and below and save them in a malloc.h file. But there's no
 544   compelling reason to bother to do this.)
 545 
 546   The main declaration needed is the mallinfo struct that is returned
 547   (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
 548   bunch of fields, most of which are not even meaningful in this
 549   version of malloc. Some of these fields are are instead filled by
 550   mallinfo() with other numbers that might possibly be of interest.
 551 
 552   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
 553   /usr/include/malloc.h file that includes a declaration of struct
 554   mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
 555   version is declared below.  These must be precisely the same for
 556   mallinfo() to work.
 557 
 558 */
 559 
 560 /* #define HAVE_USR_INCLUDE_MALLOC_H */
 561 
 562 #if HAVE_USR_INCLUDE_MALLOC_H
 563 #include "/usr/include/malloc.h"
 564 #else
 565 
 566 /* SVID2/XPG mallinfo structure */
 567 
 568 struct mallinfo {
 569   int arena;    /* total space allocated from system */
 570   int ordblks;  /* number of non-inuse chunks */
 571   int smblks;   /* unused -- always zero */
 572   int hblks;    /* number of mmapped regions */
 573   int hblkhd;   /* total space in mmapped regions */
 574   int usmblks;  /* unused -- always zero */
 575   int fsmblks;  /* unused -- always zero */
 576   int uordblks; /* total allocated space */
 577   int fordblks; /* total non-inuse space */
 578   int keepcost; /* top-most, releasable (via malloc_trim) space */
 579 };      
 580 
 581 /* SVID2/XPG mallopt options */
 582 
 583 #define M_MXFAST  1    /* UNUSED in this malloc */
 584 #define M_NLBLKS  2    /* UNUSED in this malloc */
 585 #define M_GRAIN   3    /* UNUSED in this malloc */
 586 #define M_KEEP    4    /* UNUSED in this malloc */
 587 
 588 #endif
 589 
 590 /* mallopt options that actually do something */
 591 
 592 #define M_TRIM_THRESHOLD    -1
 593 #define M_TOP_PAD           -2
 594 #define M_MMAP_THRESHOLD    -3
 595 #define M_MMAP_MAX          -4
 596 
 597 
 598 
 599 #ifndef DEFAULT_TRIM_THRESHOLD
 600 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
 601 #endif
 602 
 603 /*
 604     M_TRIM_THRESHOLD is the maximum amount of unused top-most memory 
 605       to keep before releasing via malloc_trim in free().
 606 
 607       Automatic trimming is mainly useful in long-lived programs.
 608       Because trimming via sbrk can be slow on some systems, and can
 609       sometimes be wasteful (in cases where programs immediately
 610       afterward allocate more large chunks) the value should be high
 611       enough so that your overall system performance would improve by
 612       releasing.  
 613 
 614       The trim threshold and the mmap control parameters (see below)
 615       can be traded off with one another. Trimming and mmapping are
 616       two different ways of releasing unused memory back to the
 617       system. Between these two, it is often possible to keep
 618       system-level demands of a long-lived program down to a bare
 619       minimum. For example, in one test suite of sessions measuring
 620       the XF86 X server on Linux, using a trim threshold of 128K and a
 621       mmap threshold of 192K led to near-minimal long term resource
 622       consumption.  
 623 
 624       If you are using this malloc in a long-lived program, it should
 625       pay to experiment with these values.  As a rough guide, you
 626       might set to a value close to the average size of a process
 627       (program) running on your system.  Releasing this much memory
 628       would allow such a process to run in memory.  Generally, it's
 629       worth it to tune for trimming rather tham memory mapping when a
 630       program undergoes phases where several large chunks are
 631       allocated and released in ways that can reuse each other's
 632       storage, perhaps mixed with phases where there are no such
 633       chunks at all.  And in well-behaved long-lived programs,
 634       controlling release of large blocks via trimming versus mapping
 635       is usually faster.
 636 
 637       However, in most programs, these parameters serve mainly as
 638       protection against the system-level effects of carrying around
 639       massive amounts of unneeded memory. Since frequent calls to
 640       sbrk, mmap, and munmap otherwise degrade performance, the default
 641       parameters are set to relatively high values that serve only as
 642       safeguards.
 643 
 644       The default trim value is high enough to cause trimming only in
 645       fairly extreme (by current memory consumption standards) cases.
 646       It must be greater than page size to have any useful effect.  To
 647       disable trimming completely, you can set to (unsigned long)(-1);
 648 
 649 
 650 */
 651 
 652 
 653 #ifndef DEFAULT_TOP_PAD
 654 #define DEFAULT_TOP_PAD        (0)
 655 #endif
 656 
 657 /*
 658     M_TOP_PAD is the amount of extra `padding' space to allocate or 
 659       retain whenever sbrk is called. It is used in two ways internally:
 660 
 661       * When sbrk is called to extend the top of the arena to satisfy
 662         a new malloc request, this much padding is added to the sbrk
 663         request.
 664 
 665       * When malloc_trim is called automatically from free(),
 666         it is used as the `pad' argument.
 667 
 668       In both cases, the actual amount of padding is rounded 
 669       so that the end of the arena is always a system page boundary.
 670 
 671       The main reason for using padding is to avoid calling sbrk so
 672       often. Having even a small pad greatly reduces the likelihood
 673       that nearly every malloc request during program start-up (or
 674       after trimming) will invoke sbrk, which needlessly wastes
 675       time. 
 676 
 677       Automatic rounding-up to page-size units is normally sufficient
 678       to avoid measurable overhead, so the default is 0.  However, in
 679       systems where sbrk is relatively slow, it can pay to increase
 680       this value, at the expense of carrying around more memory than 
 681       the program needs.
 682 
 683 */
 684 
 685 
 686 #ifndef DEFAULT_MMAP_THRESHOLD
 687 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
 688 #endif
 689 
 690 /*
 691 
 692     M_MMAP_THRESHOLD is the request size threshold for using mmap() 
 693       to service a request. Requests of at least this size that cannot 
 694       be allocated using already-existing space will be serviced via mmap.  
 695       (If enough normal freed space already exists it is used instead.)
 696 
 697       Using mmap segregates relatively large chunks of memory so that
 698       they can be individually obtained and released from the host
 699       system. A request serviced through mmap is never reused by any
 700       other request (at least not directly; the system may just so
 701       happen to remap successive requests to the same locations).
 702 
 703       Segregating space in this way has the benefit that mmapped space
 704       can ALWAYS be individually released back to the system, which
 705       helps keep the system level memory demands of a long-lived
 706       program low. Mapped memory can never become `locked' between
 707       other chunks, as can happen with normally allocated chunks, which
 708       menas that even trimming via malloc_trim would not release them.
 709 
 710       However, it has the disadvantages that:
 711 
 712          1. The space cannot be reclaimed, consolidated, and then
 713             used to service later requests, as happens with normal chunks. 
 714          2. It can lead to more wastage because of mmap page alignment
 715             requirements
 716          3. It causes malloc performance to be more dependent on host
 717             system memory management support routines which may vary in
 718             implementation quality and may impose arbitrary
 719             limitations. Generally, servicing a request via normal
 720             malloc steps is faster than going through a system's mmap.
 721 
 722       All together, these considerations should lead you to use mmap
 723       only for relatively large requests.  
 724 
 725 
 726 */
 727 
 728 
 729 
 730 #ifndef DEFAULT_MMAP_MAX
 731 #if HAVE_MMAP
 732 #define DEFAULT_MMAP_MAX       (64)
 733 #else
 734 #define DEFAULT_MMAP_MAX       (0)
 735 #endif
 736 #endif
 737 
 738 /*
 739     M_MMAP_MAX is the maximum number of requests to simultaneously 
 740       service using mmap. This parameter exists because:
 741 
 742          1. Some systems have a limited number of internal tables for
 743             use by mmap.
 744          2. In most systems, overreliance on mmap can degrade overall
 745             performance.
 746          3. If a program allocates many large regions, it is probably
 747             better off using normal sbrk-based allocation routines that
 748             can reclaim and reallocate normal heap memory. Using a
 749             small value allows transition into this mode after the
 750             first few allocations.
 751 
 752       Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
 753       the default value is 0, and attempts to set it to non-zero values
 754       in mallopt will fail.
 755 */
 756 
 757 
 758 
 759 
 760 /* 
 761 
 762   Special defines for linux libc
 763 
 764   Except when compiled using these special defines for Linux libc
 765   using weak aliases, this malloc is NOT designed to work in
 766   multithreaded applications.  No semaphores or other concurrency
 767   control are provided to ensure that multiple malloc or free calls
 768   don't run at the same time, which could be disasterous. A single
 769   semaphore could be used across malloc, realloc, and free (which is
 770   essentially the effect of the linux weak alias approach). It would
 771   be hard to obtain finer granularity.
 772 
 773 */
 774 
 775 
 776 #ifdef INTERNAL_LINUX_C_LIB
 777 
 778 #if __STD_C
 779 
 780 Void_t * __default_morecore_init (ptrdiff_t);
 781 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
 782 
 783 #else
 784 
 785 Void_t * __default_morecore_init ();
 786 Void_t *(*__morecore)() = __default_morecore_init;
 787 
 788 #endif
 789 
 790 #define MORECORE (*__morecore)
 791 #define MORECORE_FAILURE 0
 792 #define MORECORE_CLEARS 1 
 793 
 794 #else /* INTERNAL_LINUX_C_LIB */
 795 /*
 796 #if __STD_C
 797 extern Void_t*     sbrk(ptrdiff_t);
 798 #else
 799 extern Void_t*     sbrk();
 800 #endif
 801 */
 802 #ifndef MORECORE
 803 #define MORECORE sbrk
 804 #endif
 805 
 806 #ifndef MORECORE_FAILURE
 807 #define MORECORE_FAILURE -1
 808 #endif
 809 
 810 #ifndef MORECORE_CLEARS
 811 #define MORECORE_CLEARS 1
 812 #endif
 813 
 814 #endif /* INTERNAL_LINUX_C_LIB */
 815 
 816 #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
 817 
 818 #define cALLOc          __libc_calloc
 819 #define fREe            __libc_free
 820 #define mALLOc          __libc_malloc
 821 #define mEMALIGn        __libc_memalign
 822 #define rEALLOc         __libc_realloc
 823 #define vALLOc          __libc_valloc
 824 #define pvALLOc         __libc_pvalloc
 825 #define mALLINFo        __libc_mallinfo
 826 #define mALLOPt         __libc_mallopt
 827 
 828 #pragma weak calloc = __libc_calloc
 829 #pragma weak free = __libc_free
 830 #pragma weak cfree = __libc_free
 831 #pragma weak malloc = __libc_malloc
 832 #pragma weak memalign = __libc_memalign
 833 #pragma weak realloc = __libc_realloc
 834 #pragma weak valloc = __libc_valloc
 835 #pragma weak pvalloc = __libc_pvalloc
 836 #pragma weak mallinfo = __libc_mallinfo
 837 #pragma weak mallopt = __libc_mallopt
 838 
 839 #else
 840 
 841 
 842 #define cALLOc          calloc
 843 #define fREe            free
 844 #define mALLOc          malloc
 845 #define mEMALIGn        memalign
 846 #define rEALLOc         realloc
 847 #define vALLOc          valloc
 848 #define pvALLOc         pvalloc
 849 #define mALLINFo        mallinfo
 850 #define mALLOPt         mallopt
 851 
 852 #endif
 853 
 854 /* Public routines */
 855 
 856 #if __STD_C
 857 
 858 Void_t* mALLOc(size_t);
 859 void    fREe(Void_t*);
 860 Void_t* rEALLOc(Void_t*, size_t);
 861 Void_t* mEMALIGn(size_t, size_t);
 862 Void_t* vALLOc(size_t);
 863 Void_t* pvALLOc(size_t);
 864 Void_t* cALLOc(size_t, size_t);
 865 void    cfree(Void_t*);
 866 int     malloc_trim(size_t);
 867 size_t  malloc_usable_size(Void_t*);
 868 void    malloc_stats();
 869 int     mALLOPt(int, int);
 870 struct mallinfo mALLINFo(void);
 871 #else
 872 Void_t* mALLOc();
 873 void    fREe();
 874 Void_t* rEALLOc();
 875 Void_t* mEMALIGn();
 876 Void_t* vALLOc();
 877 Void_t* pvALLOc();
 878 Void_t* cALLOc();
 879 void    cfree();
 880 int     malloc_trim();
 881 size_t  malloc_usable_size();
 882 void    malloc_stats();
 883 int     mALLOPt();
 884 struct mallinfo mALLINFo();
 885 #endif
 886 
 887 
 888 #ifdef __cplusplus
 889 };  /* end of extern "C" */
 890 #endif
 891 
 892 /* ---------- To make a malloc.h, end cutting here ------------ */

/* [<][>][^][v][top][bottom][index][help] */