src/stdlib/SDL_malloc.c
author Sam Lantinga
Tue, 21 Feb 2006 08:46:50 +0000
changeset 1402 d910939febfa
parent 1354 22f39393668a
child 1433 bb6839704ed6
permissions -rw-r--r--
Use consistent identifiers for the various platforms we support.
Make sure every source file includes SDL_config.h, so the proper system
headers are chosen.
     1 /*
     2     SDL - Simple DirectMedia Layer
     3     Copyright (C) 1997-2006 Sam Lantinga
     4 
     5     This library is free software; you can redistribute it and/or
     6     modify it under the terms of the GNU Lesser General Public
     7     License as published by the Free Software Foundation; either
     8     version 2.1 of the License, or (at your option) any later version.
     9 
    10     This library is distributed in the hope that it will be useful,
    11     but WITHOUT ANY WARRANTY; without even the implied warranty of
    12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    13     Lesser General Public License for more details.
    14 
    15     You should have received a copy of the GNU Lesser General Public
    16     License along with this library; if not, write to the Free Software
    17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    18 
    19     Sam Lantinga
    20     slouken@libsdl.org
    21 */
    22 #include "SDL_config.h"
    23 
    24 /* This file contains portable memory management functions for SDL */
    25 
    26 #include "SDL_stdinc.h"
    27 
    28 #ifndef HAVE_MALLOC
    29 
    30 #define LACKS_STDIO_H
    31 #define LACKS_STRINGS_H
    32 #define LACKS_STRING_H
    33 #define LACKS_STDLIB_H
    34 #define ABORT
    35 #define memset	SDL_memset
    36 #define memcpy	SDL_memcpy
    37 #define malloc	SDL_malloc
    38 #define calloc	SDL_calloc
    39 #define realloc	SDL_realloc
    40 #define free	SDL_free
    41 
    42 /*
    43   This is a version (aka dlmalloc) of malloc/free/realloc written by
    44   Doug Lea and released to the public domain, as explained at
    45   http://creativecommons.org/licenses/publicdomain.  Send questions,
    46   comments, complaints, performance data, etc to dl@cs.oswego.edu
    47 
    48 * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    49 
    50    Note: There may be an updated version of this malloc obtainable at
    51            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    52          Check before installing!
    53 
    54 * Quickstart
    55 
    56   This library is all in one file to simplify the most common usage:
    57   ftp it, compile it (-O3), and link it into another program. All of
    58   the compile-time options default to reasonable values for use on
    59   most platforms.  You might later want to step through various
    60   compile-time and dynamic tuning options.
    61 
    62   For convenience, an include file for code using this malloc is at:
    63      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    64   You don't really need this .h file unless you call functions not
    65   defined in your system include files.  The .h file contains only the
    66   excerpts from this file needed for using this malloc on ANSI C/C++
    67   systems, so long as you haven't changed compile-time options about
    68   naming and tuning parameters.  If you do, then you can create your
    69   own malloc.h that does include all settings by cutting at the point
    70   indicated below. Note that you may already by default be using a C
    71   library containing a malloc that is based on some version of this
    72   malloc (for example in linux). You might still want to use the one
    73   in this file to customize settings or to avoid overheads associated
    74   with library versions.
    75 
    76 * Vital statistics:
    77 
    78   Supported pointer/size_t representation:       4 or 8 bytes
    79        size_t MUST be an unsigned type of the same width as
    80        pointers. (If you are using an ancient system that declares
    81        size_t as a signed type, or need it to be a different width
    82        than pointers, you can use a previous release of this malloc
    83        (e.g. 2.7.2) supporting these.)
    84 
    85   Alignment:                                     8 bytes (default)
    86        This suffices for nearly all current machines and C compilers.
    87        However, you can define MALLOC_ALIGNMENT to be wider than this
    88        if necessary (up to 128bytes), at the expense of using more space.
    89 
    90   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    91                                           8 or 16 bytes (if 8byte sizes)
    92        Each malloced chunk has a hidden word of overhead holding size
    93        and status information, and additional cross-check word
    94        if FOOTERS is defined.
    95 
    96   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
    97                           8-byte ptrs:  32 bytes    (including overhead)
    98 
    99        Even a request for zero bytes (i.e., malloc(0)) returns a
   100        pointer to something of the minimum allocatable size.
   101        The maximum overhead wastage (i.e., number of extra bytes
   102        allocated than were requested in malloc) is less than or equal
   103        to the minimum size, except for requests >= mmap_threshold that
   104        are serviced via mmap(), where the worst case wastage is about
   105        32 bytes plus the remainder from a system page (the minimal
   106        mmap unit); typically 4096 or 8192 bytes.
   107 
   108   Security: static-safe; optionally more or less
   109        The "security" of malloc refers to the ability of malicious
   110        code to accentuate the effects of errors (for example, freeing
   111        space that is not currently malloc'ed or overwriting past the
   112        ends of chunks) in code that calls malloc.  This malloc
   113        guarantees not to modify any memory locations below the base of
   114        heap, i.e., static variables, even in the presence of usage
   115        errors.  The routines additionally detect most improper frees
   116        and reallocs.  All this holds as long as the static bookkeeping
   117        for malloc itself is not corrupted by some other means.  This
   118        is only one aspect of security -- these checks do not, and
   119        cannot, detect all possible programming errors.
   120 
   121        If FOOTERS is defined nonzero, then each allocated chunk
   122        carries an additional check word to verify that it was malloced
   123        from its space.  These check words are the same within each
   124        execution of a program using malloc, but differ across
   125        executions, so externally crafted fake chunks cannot be
   126        freed. This improves security by rejecting frees/reallocs that
   127        could corrupt heap memory, in addition to the checks preventing
   128        writes to statics that are always on.  This may further improve
   129        security at the expense of time and space overhead.  (Note that
   130        FOOTERS may also be worth using with MSPACES.)
   131 
   132        By default detected errors cause the program to abort (calling
   133        "abort()"). You can override this to instead proceed past
   134        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   135        has no effect, and a malloc that encounters a bad address
   136        caused by user overwrites will ignore the bad address by
   137        dropping pointers and indices to all known memory. This may
   138        be appropriate for programs that should continue if at all
   139        possible in the face of programming errors, although they may
   140        run out of memory because dropped memory is never reclaimed.
   141 
   142        If you don't like either of these options, you can define
   143        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   144        else. And if if you are sure that your program using malloc has
   145        no errors or vulnerabilities, you can define INSECURE to 1,
   146        which might (or might not) provide a small performance improvement.
   147 
   148   Thread-safety: NOT thread-safe unless USE_LOCKS defined
   149        When USE_LOCKS is defined, each public call to malloc, free,
   150        etc is surrounded with either a pthread mutex or a win32
   151        spinlock (depending on WIN32). This is not especially fast, and
   152        can be a major bottleneck.  It is designed only to provide
   153        minimal protection in concurrent environments, and to provide a
   154        basis for extensions.  If you are using malloc in a concurrent
   155        program, consider instead using ptmalloc, which is derived from
   156        a version of this malloc. (See http://www.malloc.de).
   157 
   158   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   159        This malloc can use unix sbrk or any emulation (invoked using
   160        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   161        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   162        memory.  On most unix systems, it tends to work best if both
   163        MORECORE and MMAP are enabled.  On Win32, it uses emulations
   164        based on VirtualAlloc. It also uses common C library functions
   165        like memset.
   166 
   167   Compliance: I believe it is compliant with the Single Unix Specification
   168        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   169        others as well.
   170 
   171 * Overview of algorithms
   172 
   173   This is not the fastest, most space-conserving, most portable, or
   174   most tunable malloc ever written. However it is among the fastest
   175   while also being among the most space-conserving, portable and
   176   tunable.  Consistent balance across these factors results in a good
   177   general-purpose allocator for malloc-intensive programs.
   178 
   179   In most ways, this malloc is a best-fit allocator. Generally, it
   180   chooses the best-fitting existing chunk for a request, with ties
   181   broken in approximately least-recently-used order. (This strategy
   182   normally maintains low fragmentation.) However, for requests less
   183   than 256bytes, it deviates from best-fit when there is not an
   184   exactly fitting available chunk by preferring to use space adjacent
   185   to that used for the previous small request, as well as by breaking
   186   ties in approximately most-recently-used order. (These enhance
   187   locality of series of small allocations.)  And for very large requests
   188   (>= 256Kb by default), it relies on system memory mapping
   189   facilities, if supported.  (This helps avoid carrying around and
   190   possibly fragmenting memory used only for large chunks.)
   191 
   192   All operations (except malloc_stats and mallinfo) have execution
   193   times that are bounded by a constant factor of the number of bits in
   194   a size_t, not counting any clearing in calloc or copying in realloc,
   195   or actions surrounding MORECORE and MMAP that have times
   196   proportional to the number of non-contiguous regions returned by
   197   system allocation routines, which is often just 1.
   198 
   199   The implementation is not very modular and seriously overuses
   200   macros. Perhaps someday all C compilers will do as good a job
   201   inlining modular code as can now be done by brute-force expansion,
   202   but now, enough of them seem not to.
   203 
   204   Some compilers issue a lot of warnings about code that is
   205   dead/unreachable only on some platforms, and also about intentional
   206   uses of negation on unsigned types. All known cases of each can be
   207   ignored.
   208 
   209   For a longer but out of date high-level description, see
   210      http://gee.cs.oswego.edu/dl/html/malloc.html
   211 
   212 * MSPACES
   213   If MSPACES is defined, then in addition to malloc, free, etc.,
   214   this file also defines mspace_malloc, mspace_free, etc. These
   215   are versions of malloc routines that take an "mspace" argument
   216   obtained using create_mspace, to control all internal bookkeeping.
   217   If ONLY_MSPACES is defined, only these versions are compiled.
   218   So if you would like to use this allocator for only some allocations,
   219   and your system malloc for others, you can compile with
   220   ONLY_MSPACES and then do something like...
   221     static mspace mymspace = create_mspace(0,0); // for example
   222     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   223 
   224   (Note: If you only need one instance of an mspace, you can instead
   225   use "USE_DL_PREFIX" to relabel the global malloc.)
   226 
   227   You can similarly create thread-local allocators by storing
   228   mspaces as thread-locals. For example:
   229     static __thread mspace tlms = 0;
   230     void*  tlmalloc(size_t bytes) {
   231       if (tlms == 0) tlms = create_mspace(0, 0);
   232       return mspace_malloc(tlms, bytes);
   233     }
   234     void  tlfree(void* mem) { mspace_free(tlms, mem); }
   235 
   236   Unless FOOTERS is defined, each mspace is completely independent.
   237   You cannot allocate from one and free to another (although
   238   conformance is only weakly checked, so usage errors are not always
   239   caught). If FOOTERS is defined, then each chunk carries around a tag
   240   indicating its originating mspace, and frees are directed to their
   241   originating spaces.
   242 
   243  -------------------------  Compile-time options ---------------------------
   244 
   245 Be careful in setting #define values for numerical constants of type
   246 size_t. On some systems, literal values are not automatically extended
   247 to size_t precision unless they are explicitly casted.
   248 
   249 WIN32                    default: defined if _WIN32 defined
   250   Defining WIN32 sets up defaults for MS environment and compilers.
   251   Otherwise defaults are for unix.
   252 
   253 MALLOC_ALIGNMENT         default: (size_t)8
   254   Controls the minimum alignment for malloc'ed chunks.  It must be a
   255   power of two and at least 8, even on machines for which smaller
   256   alignments would suffice. It may be defined as larger than this
   257   though. Note however that code and data structures are optimized for
   258   the case of 8-byte alignment.
   259 
   260 MSPACES                  default: 0 (false)
   261   If true, compile in support for independent allocation spaces.
   262   This is only supported if HAVE_MMAP is true.
   263 
   264 ONLY_MSPACES             default: 0 (false)
   265   If true, only compile in mspace versions, not regular versions.
   266 
   267 USE_LOCKS                default: 0 (false)
   268   Causes each call to each public routine to be surrounded with
   269   pthread or WIN32 mutex lock/unlock. (If set true, this can be
   270   overridden on a per-mspace basis for mspace versions.)
   271 
   272 FOOTERS                  default: 0
   273   If true, provide extra checking and dispatching by placing
   274   information in the footers of allocated chunks. This adds
   275   space and time overhead.
   276 
   277 INSECURE                 default: 0
   278   If true, omit checks for usage errors and heap space overwrites.
   279 
   280 USE_DL_PREFIX            default: NOT defined
   281   Causes compiler to prefix all public routines with the string 'dl'.
   282   This can be useful when you only want to use this malloc in one part
   283   of a program, using your regular system malloc elsewhere.
   284 
   285 ABORT                    default: defined as abort()
   286   Defines how to abort on failed checks.  On most systems, a failed
   287   check cannot die with an "assert" or even print an informative
   288   message, because the underlying print routines in turn call malloc,
   289   which will fail again.  Generally, the best policy is to simply call
   290   abort(). It's not very useful to do more than this because many
   291   errors due to overwriting will show up as address faults (null, odd
   292   addresses etc) rather than malloc-triggered checks, so will also
   293   abort.  Also, most compilers know that abort() does not return, so
   294   can better optimize code conditionally calling it.
   295 
   296 PROCEED_ON_ERROR           default: defined as 0 (false)
   297   Controls whether detected bad addresses cause them to bypassed
   298   rather than aborting. If set, detected bad arguments to free and
   299   realloc are ignored. And all bookkeeping information is zeroed out
   300   upon a detected overwrite of freed heap space, thus losing the
   301   ability to ever return it from malloc again, but enabling the
   302   application to proceed. If PROCEED_ON_ERROR is defined, the
   303   static variable malloc_corruption_error_count is compiled in
   304   and can be examined to see if errors have occurred. This option
   305   generates slower code than the default abort policy.
   306 
   307 DEBUG                    default: NOT defined
   308   The DEBUG setting is mainly intended for people trying to modify
   309   this code or diagnose problems when porting to new platforms.
   310   However, it may also be able to better isolate user errors than just
   311   using runtime checks.  The assertions in the check routines spell
   312   out in more detail the assumptions and invariants underlying the
   313   algorithms.  The checking is fairly extensive, and will slow down
   314   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   315   set will attempt to check every non-mmapped allocated and free chunk
   316   in the course of computing the summaries.
   317 
   318 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   319   Debugging assertion failures can be nearly impossible if your
   320   version of the assert macro causes malloc to be called, which will
   321   lead to a cascade of further failures, blowing the runtime stack.
   322   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   323   which will usually make debugging easier.
   324 
   325 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   326   The action to take before "return 0" when malloc fails to be able to
   327   return memory because there is none available.
   328 
   329 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   330   True if this system supports sbrk or an emulation of it.
   331 
   332 MORECORE                  default: sbrk
   333   The name of the sbrk-style system routine to call to obtain more
   334   memory.  See below for guidance on writing custom MORECORE
   335   functions. The type of the argument to sbrk/MORECORE varies across
   336   systems.  It cannot be size_t, because it supports negative
   337   arguments, so it is normally the signed type of the same width as
   338   size_t (sometimes declared as "intptr_t").  It doesn't much matter
   339   though. Internally, we only call it with arguments less than half
   340   the max value of a size_t, which should work across all reasonable
   341   possibilities, although sometimes generating compiler warnings.  See
   342   near the end of this file for guidelines for creating a custom
   343   version of MORECORE.
   344 
   345 MORECORE_CONTIGUOUS       default: 1 (true)
   346   If true, take advantage of fact that consecutive calls to MORECORE
   347   with positive arguments always return contiguous increasing
   348   addresses.  This is true of unix sbrk. It does not hurt too much to
   349   set it true anyway, since malloc copes with non-contiguities.
   350   Setting it false when definitely non-contiguous saves time
   351   and possibly wasted space it would take to discover this though.
   352 
   353 MORECORE_CANNOT_TRIM      default: NOT defined
   354   True if MORECORE cannot release space back to the system when given
   355   negative arguments. This is generally necessary only if you are
   356   using a hand-crafted MORECORE function that cannot handle negative
   357   arguments.
   358 
   359 HAVE_MMAP                 default: 1 (true)
   360   True if this system supports mmap or an emulation of it.  If so, and
   361   HAVE_MORECORE is not true, MMAP is used for all system
   362   allocation. If set and HAVE_MORECORE is true as well, MMAP is
   363   primarily used to directly allocate very large blocks. It is also
   364   used as a backup strategy in cases where MORECORE fails to provide
   365   space from system. Note: A single call to MUNMAP is assumed to be
   366   able to unmap memory that may have be allocated using multiple calls
   367   to MMAP, so long as they are adjacent.
   368 
   369 HAVE_MREMAP               default: 1 on linux, else 0
   370   If true realloc() uses mremap() to re-allocate large blocks and
   371   extend or shrink allocation spaces.
   372 
   373 MMAP_CLEARS               default: 1 on unix
   374   True if mmap clears memory so calloc doesn't need to. This is true
   375   for standard unix mmap using /dev/zero.
   376 
   377 USE_BUILTIN_FFS            default: 0 (i.e., not used)
   378   Causes malloc to use the builtin ffs() function to compute indices.
   379   Some compilers may recognize and intrinsify ffs to be faster than the
   380   supplied C version. Also, the case of x86 using gcc is special-cased
   381   to an asm instruction, so is already as fast as it can be, and so
   382   this setting has no effect. (On most x86s, the asm version is only
   383   slightly faster than the C version.)
   384 
   385 malloc_getpagesize         default: derive from system includes, or 4096.
   386   The system page size. To the extent possible, this malloc manages
   387   memory from the system in page-size units.  This may be (and
   388   usually is) a function rather than a constant. This is ignored
   389   if WIN32, where page size is determined using getSystemInfo during
   390   initialization.
   391 
   392 USE_DEV_RANDOM             default: 0 (i.e., not used)
   393   Causes malloc to use /dev/random to initialize secure magic seed for
   394   stamping footers. Otherwise, the current time is used.
   395 
   396 NO_MALLINFO                default: 0
   397   If defined, don't compile "mallinfo". This can be a simple way
   398   of dealing with mismatches between system declarations and
   399   those in this file.
   400 
   401 MALLINFO_FIELD_TYPE        default: size_t
   402   The type of the fields in the mallinfo struct. This was originally
   403   defined as "int" in SVID etc, but is more usefully defined as
   404   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   405 
   406 REALLOC_ZERO_BYTES_FREES    default: not defined
   407   This should be set if a call to realloc with zero bytes should 
   408   be the same as a call to free. Some people think it should. Otherwise, 
   409   since this malloc returns a unique pointer for malloc(0), so does 
   410   realloc(p, 0).
   411 
   412 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   413 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   414 LACKS_STDLIB_H                default: NOT defined unless on WIN32
   415   Define these if your system does not have these header files.
   416   You might need to manually insert some of the declarations they provide.
   417 
   418 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   419                                 system_info.dwAllocationGranularity in WIN32,
   420                                 otherwise 64K.
   421       Also settable using mallopt(M_GRANULARITY, x)
   422   The unit for allocating and deallocating memory from the system.  On
   423   most systems with contiguous MORECORE, there is no reason to
   424   make this more than a page. However, systems with MMAP tend to
   425   either require or encourage larger granularities.  You can increase
   426   this value to prevent system allocation functions to be called so
   427   often, especially if they are slow.  The value must be at least one
   428   page and must be a power of two.  Setting to 0 causes initialization
   429   to either page size or win32 region size.  (Note: In previous
   430   versions of malloc, the equivalent of this option was called
   431   "TOP_PAD")
   432 
   433 DEFAULT_TRIM_THRESHOLD    default: 2MB
   434       Also settable using mallopt(M_TRIM_THRESHOLD, x)
   435   The maximum amount of unused top-most memory to keep before
   436   releasing via malloc_trim in free().  Automatic trimming is mainly
   437   useful in long-lived programs using contiguous MORECORE.  Because
   438   trimming via sbrk can be slow on some systems, and can sometimes be
   439   wasteful (in cases where programs immediately afterward allocate
   440   more large chunks) the value should be high enough so that your
   441   overall system performance would improve by releasing this much
   442   memory.  As a rough guide, you might set to a value close to the
   443   average size of a process (program) running on your system.
   444   Releasing this much memory would allow such a process to run in
   445   memory.  Generally, it is worth tuning trim thresholds when a
   446   program undergoes phases where several large chunks are allocated
   447   and released in ways that can reuse each other's storage, perhaps
   448   mixed with phases where there are no such chunks at all. The trim
   449   value must be greater than page size to have any useful effect.  To
   450   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   451   some people use of mallocing a huge space and then freeing it at
   452   program startup, in an attempt to reserve system memory, doesn't
   453   have the intended effect under automatic trimming, since that memory
   454   will immediately be returned to the system.
   455 
   456 DEFAULT_MMAP_THRESHOLD       default: 256K
   457       Also settable using mallopt(M_MMAP_THRESHOLD, x)
   458   The request size threshold for using MMAP to directly service a
   459   request. Requests of at least this size that cannot be allocated
   460   using already-existing space will be serviced via mmap.  (If enough
   461   normal freed space already exists it is used instead.)  Using mmap
   462   segregates relatively large chunks of memory so that they can be
   463   individually obtained and released from the host system. A request
   464   serviced through mmap is never reused by any other request (at least
   465   not directly; the system may just so happen to remap successive
   466   requests to the same locations).  Segregating space in this way has
   467   the benefits that: Mmapped space can always be individually released
   468   back to the system, which helps keep the system level memory demands
   469   of a long-lived program low.  Also, mapped memory doesn't become
   470   `locked' between other chunks, as can happen with normally allocated
   471   chunks, which means that even trimming via malloc_trim would not
   472   release them.  However, it has the disadvantage that the space
   473   cannot be reclaimed, consolidated, and then used to service later
   474   requests, as happens with normal chunks.  The advantages of mmap
   475   nearly always outweigh disadvantages for "large" chunks, but the
   476   value of "large" may vary across systems.  The default is an
   477   empirically derived value that works well in most systems. You can
   478   disable mmap by setting to MAX_SIZE_T.
   479 
   480 */
   481 
   482 #ifndef WIN32
   483 #ifdef _WIN32
   484 #define WIN32 1
   485 #endif  /* _WIN32 */
   486 #endif  /* WIN32 */
   487 #ifdef WIN32
   488 #include "SDL_windows.h"
   489 #define HAVE_MMAP 1
   490 #define HAVE_MORECORE 0
   491 #define LACKS_UNISTD_H
   492 #define LACKS_SYS_PARAM_H
   493 #define LACKS_SYS_MMAN_H
   494 #define LACKS_STRING_H
   495 #define LACKS_STRINGS_H
   496 #define LACKS_SYS_TYPES_H
   497 #define LACKS_ERRNO_H
   498 #define MALLOC_FAILURE_ACTION
   499 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   500 #endif  /* WIN32 */
   501 
   502 #if defined(DARWIN) || defined(_DARWIN)
   503 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   504 #ifndef HAVE_MORECORE
   505 #define HAVE_MORECORE 0
   506 #define HAVE_MMAP 1
   507 #endif  /* HAVE_MORECORE */
   508 #endif  /* DARWIN */
   509 
   510 #ifndef LACKS_SYS_TYPES_H
   511 #include <sys/types.h>  /* For size_t */
   512 #endif  /* LACKS_SYS_TYPES_H */
   513 
   514 /* The maximum possible size_t value has all bits set */
   515 #define MAX_SIZE_T           (~(size_t)0)
   516 
   517 #ifndef ONLY_MSPACES
   518 #define ONLY_MSPACES 0
   519 #endif  /* ONLY_MSPACES */
   520 #ifndef MSPACES
   521 #if ONLY_MSPACES
   522 #define MSPACES 1
   523 #else   /* ONLY_MSPACES */
   524 #define MSPACES 0
   525 #endif  /* ONLY_MSPACES */
   526 #endif  /* MSPACES */
   527 #ifndef MALLOC_ALIGNMENT
   528 #define MALLOC_ALIGNMENT ((size_t)8U)
   529 #endif  /* MALLOC_ALIGNMENT */
   530 #ifndef FOOTERS
   531 #define FOOTERS 0
   532 #endif  /* FOOTERS */
   533 #ifndef ABORT
   534 #define ABORT  abort()
   535 #endif  /* ABORT */
   536 #ifndef ABORT_ON_ASSERT_FAILURE
   537 #define ABORT_ON_ASSERT_FAILURE 1
   538 #endif  /* ABORT_ON_ASSERT_FAILURE */
   539 #ifndef PROCEED_ON_ERROR
   540 #define PROCEED_ON_ERROR 0
   541 #endif  /* PROCEED_ON_ERROR */
   542 #ifndef USE_LOCKS
   543 #define USE_LOCKS 0
   544 #endif  /* USE_LOCKS */
   545 #ifndef INSECURE
   546 #define INSECURE 0
   547 #endif  /* INSECURE */
   548 #ifndef HAVE_MMAP
   549 #define HAVE_MMAP 1
   550 #endif  /* HAVE_MMAP */
   551 #ifndef MMAP_CLEARS
   552 #define MMAP_CLEARS 1
   553 #endif  /* MMAP_CLEARS */
   554 #ifndef HAVE_MREMAP
   555 #ifdef linux
   556 #define HAVE_MREMAP 1
   557 #else   /* linux */
   558 #define HAVE_MREMAP 0
   559 #endif  /* linux */
   560 #endif  /* HAVE_MREMAP */
   561 #ifndef MALLOC_FAILURE_ACTION
   562 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   563 #endif  /* MALLOC_FAILURE_ACTION */
   564 #ifndef HAVE_MORECORE
   565 #if ONLY_MSPACES
   566 #define HAVE_MORECORE 0
   567 #else   /* ONLY_MSPACES */
   568 #define HAVE_MORECORE 1
   569 #endif  /* ONLY_MSPACES */
   570 #endif  /* HAVE_MORECORE */
   571 #if !HAVE_MORECORE
   572 #define MORECORE_CONTIGUOUS 0
   573 #else   /* !HAVE_MORECORE */
   574 #ifndef MORECORE
   575 #define MORECORE sbrk
   576 #endif  /* MORECORE */
   577 #ifndef MORECORE_CONTIGUOUS
   578 #define MORECORE_CONTIGUOUS 1
   579 #endif  /* MORECORE_CONTIGUOUS */
   580 #endif  /* HAVE_MORECORE */
   581 #ifndef DEFAULT_GRANULARITY
   582 #if MORECORE_CONTIGUOUS
   583 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   584 #else   /* MORECORE_CONTIGUOUS */
   585 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   586 #endif  /* MORECORE_CONTIGUOUS */
   587 #endif  /* DEFAULT_GRANULARITY */
   588 #ifndef DEFAULT_TRIM_THRESHOLD
   589 #ifndef MORECORE_CANNOT_TRIM
   590 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   591 #else   /* MORECORE_CANNOT_TRIM */
   592 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   593 #endif  /* MORECORE_CANNOT_TRIM */
   594 #endif  /* DEFAULT_TRIM_THRESHOLD */
   595 #ifndef DEFAULT_MMAP_THRESHOLD
   596 #if HAVE_MMAP
   597 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   598 #else   /* HAVE_MMAP */
   599 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   600 #endif  /* HAVE_MMAP */
   601 #endif  /* DEFAULT_MMAP_THRESHOLD */
   602 #ifndef USE_BUILTIN_FFS
   603 #define USE_BUILTIN_FFS 0
   604 #endif  /* USE_BUILTIN_FFS */
   605 #ifndef USE_DEV_RANDOM
   606 #define USE_DEV_RANDOM 0
   607 #endif  /* USE_DEV_RANDOM */
   608 #ifndef NO_MALLINFO
   609 #define NO_MALLINFO 0
   610 #endif  /* NO_MALLINFO */
   611 #ifndef MALLINFO_FIELD_TYPE
   612 #define MALLINFO_FIELD_TYPE size_t
   613 #endif  /* MALLINFO_FIELD_TYPE */
   614 
   615 /*
   616   mallopt tuning options.  SVID/XPG defines four standard parameter
   617   numbers for mallopt, normally defined in malloc.h.  None of these
   618   are used in this malloc, so setting them has no effect. But this
   619   malloc does support the following options.
   620 */
   621 
   622 #define M_TRIM_THRESHOLD     (-1)
   623 #define M_GRANULARITY        (-2)
   624 #define M_MMAP_THRESHOLD     (-3)
   625 
   626 /* ------------------------ Mallinfo declarations ------------------------ */
   627 
   628 #if !NO_MALLINFO
   629 /*
   630   This version of malloc supports the standard SVID/XPG mallinfo
   631   routine that returns a struct containing usage properties and
   632   statistics. It should work on any system that has a
   633   /usr/include/malloc.h defining struct mallinfo.  The main
   634   declaration needed is the mallinfo struct that is returned (by-copy)
   635   by mallinfo().  The malloinfo struct contains a bunch of fields that
   636   are not even meaningful in this version of malloc.  These fields are
   637   are instead filled by mallinfo() with other numbers that might be of
   638   interest.
   639 
   640   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   641   /usr/include/malloc.h file that includes a declaration of struct
   642   mallinfo.  If so, it is included; else a compliant version is
   643   declared below.  These must be precisely the same for mallinfo() to
   644   work.  The original SVID version of this struct, defined on most
   645   systems with mallinfo, declares all fields as ints. But some others
   646   define as unsigned long. If your system defines the fields using a
   647   type of different width than listed here, you MUST #include your
   648   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   649 */
   650 
   651 /* #define HAVE_USR_INCLUDE_MALLOC_H */
   652 
   653 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   654 #include "/usr/include/malloc.h"
   655 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   656 
   657 struct mallinfo {
   658   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   659   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   660   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   661   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   662   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   663   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   664   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   665   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   666   MALLINFO_FIELD_TYPE fordblks; /* total free space */
   667   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   668 };
   669 
   670 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   671 #endif /* NO_MALLINFO */
   672 
   673 #ifdef __cplusplus
   674 extern "C" {
   675 #endif /* __cplusplus */
   676 
   677 #if !ONLY_MSPACES
   678 
   679 /* ------------------- Declarations of public routines ------------------- */
   680 
   681 #ifndef USE_DL_PREFIX
   682 #define dlcalloc               calloc
   683 #define dlfree                 free
   684 #define dlmalloc               malloc
   685 #define dlmemalign             memalign
   686 #define dlrealloc              realloc
   687 #define dlvalloc               valloc
   688 #define dlpvalloc              pvalloc
   689 #define dlmallinfo             mallinfo
   690 #define dlmallopt              mallopt
   691 #define dlmalloc_trim          malloc_trim
   692 #define dlmalloc_stats         malloc_stats
   693 #define dlmalloc_usable_size   malloc_usable_size
   694 #define dlmalloc_footprint     malloc_footprint
   695 #define dlmalloc_max_footprint malloc_max_footprint
   696 #define dlindependent_calloc   independent_calloc
   697 #define dlindependent_comalloc independent_comalloc
   698 #endif /* USE_DL_PREFIX */
   699 
   700 
   701 /*
   702   malloc(size_t n)
   703   Returns a pointer to a newly allocated chunk of at least n bytes, or
   704   null if no space is available, in which case errno is set to ENOMEM
   705   on ANSI C systems.
   706 
   707   If n is zero, malloc returns a minimum-sized chunk. (The minimum
   708   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   709   systems.)  Note that size_t is an unsigned type, so calls with
   710   arguments that would be negative if signed are interpreted as
   711   requests for huge amounts of space, which will often fail. The
   712   maximum supported value of n differs across systems, but is in all
   713   cases less than the maximum representable value of a size_t.
   714 */
   715 void* dlmalloc(size_t);
   716 
   717 /*
   718   free(void* p)
   719   Releases the chunk of memory pointed to by p, that had been previously
   720   allocated using malloc or a related routine such as realloc.
   721   It has no effect if p is null. If p was not malloced or already
   722   freed, free(p) will by default cause the current program to abort.
   723 */
   724 void  dlfree(void*);
   725 
   726 /*
   727   calloc(size_t n_elements, size_t element_size);
   728   Returns a pointer to n_elements * element_size bytes, with all locations
   729   set to zero.
   730 */
   731 void* dlcalloc(size_t, size_t);
   732 
   733 /*
   734   realloc(void* p, size_t n)
   735   Returns a pointer to a chunk of size n that contains the same data
   736   as does chunk p up to the minimum of (n, p's size) bytes, or null
   737   if no space is available.
   738 
   739   The returned pointer may or may not be the same as p. The algorithm
   740   prefers extending p in most cases when possible, otherwise it
   741   employs the equivalent of a malloc-copy-free sequence.
   742 
   743   If p is null, realloc is equivalent to malloc.
   744 
   745   If space is not available, realloc returns null, errno is set (if on
   746   ANSI) and p is NOT freed.
   747 
   748   if n is for fewer bytes than already held by p, the newly unused
   749   space is lopped off and freed if possible.  realloc with a size
   750   argument of zero (re)allocates a minimum-sized chunk.
   751 
   752   The old unix realloc convention of allowing the last-free'd chunk
   753   to be used as an argument to realloc is not supported.
   754 */
   755 
   756 void* dlrealloc(void*, size_t);
   757 
   758 /*
   759   memalign(size_t alignment, size_t n);
   760   Returns a pointer to a newly allocated chunk of n bytes, aligned
   761   in accord with the alignment argument.
   762 
   763   The alignment argument should be a power of two. If the argument is
   764   not a power of two, the nearest greater power is used.
   765   8-byte alignment is guaranteed by normal malloc calls, so don't
   766   bother calling memalign with an argument of 8 or less.
   767 
   768   Overreliance on memalign is a sure way to fragment space.
   769 */
   770 void* dlmemalign(size_t, size_t);
   771 
   772 /*
   773   valloc(size_t n);
   774   Equivalent to memalign(pagesize, n), where pagesize is the page
   775   size of the system. If the pagesize is unknown, 4096 is used.
   776 */
   777 void* dlvalloc(size_t);
   778 
   779 /*
   780   mallopt(int parameter_number, int parameter_value)
   781   Sets tunable parameters The format is to provide a
   782   (parameter-number, parameter-value) pair.  mallopt then sets the
   783   corresponding parameter to the argument value if it can (i.e., so
   784   long as the value is meaningful), and returns 1 if successful else
   785   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   786   normally defined in malloc.h.  None of these are use in this malloc,
   787   so setting them has no effect. But this malloc also supports other
   788   options in mallopt. See below for details.  Briefly, supported
   789   parameters are as follows (listed defaults are for "typical"
   790   configurations).
   791 
   792   Symbol            param #  default    allowed param values
   793   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   794   M_GRANULARITY        -2     page size   any power of 2 >= page size
   795   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   796 */
   797 int dlmallopt(int, int);
   798 
   799 /*
   800   malloc_footprint();
   801   Returns the number of bytes obtained from the system.  The total
   802   number of bytes allocated by malloc, realloc etc., is less than this
   803   value. Unlike mallinfo, this function returns only a precomputed
   804   result, so can be called frequently to monitor memory consumption.
   805   Even if locks are otherwise defined, this function does not use them,
   806   so results might not be up to date.
   807 */
   808 size_t dlmalloc_footprint(void);
   809 
   810 /*
   811   malloc_max_footprint();
   812   Returns the maximum number of bytes obtained from the system. This
   813   value will be greater than current footprint if deallocated space
   814   has been reclaimed by the system. The peak number of bytes allocated
   815   by malloc, realloc etc., is less than this value. Unlike mallinfo,
   816   this function returns only a precomputed result, so can be called
   817   frequently to monitor memory consumption.  Even if locks are
   818   otherwise defined, this function does not use them, so results might
   819   not be up to date.
   820 */
   821 size_t dlmalloc_max_footprint(void);
   822 
   823 #if !NO_MALLINFO
   824 /*
   825   mallinfo()
   826   Returns (by copy) a struct containing various summary statistics:
   827 
   828   arena:     current total non-mmapped bytes allocated from system
   829   ordblks:   the number of free chunks
   830   smblks:    always zero.
   831   hblks:     current number of mmapped regions
   832   hblkhd:    total bytes held in mmapped regions
   833   usmblks:   the maximum total allocated space. This will be greater
   834                 than current total if trimming has occurred.
   835   fsmblks:   always zero
   836   uordblks:  current total allocated space (normal or mmapped)
   837   fordblks:  total free space
   838   keepcost:  the maximum number of bytes that could ideally be released
   839                back to system via malloc_trim. ("ideally" means that
   840                it ignores page restrictions etc.)
   841 
   842   Because these fields are ints, but internal bookkeeping may
   843   be kept as longs, the reported values may wrap around zero and
   844   thus be inaccurate.
   845 */
   846 struct mallinfo dlmallinfo(void);
   847 #endif /* NO_MALLINFO */
   848 
   849 /*
   850   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   851 
   852   independent_calloc is similar to calloc, but instead of returning a
   853   single cleared space, it returns an array of pointers to n_elements
   854   independent elements that can hold contents of size elem_size, each
   855   of which starts out cleared, and can be independently freed,
   856   realloc'ed etc. The elements are guaranteed to be adjacently
   857   allocated (this is not guaranteed to occur with multiple callocs or
   858   mallocs), which may also improve cache locality in some
   859   applications.
   860 
   861   The "chunks" argument is optional (i.e., may be null, which is
   862   probably the most typical usage). If it is null, the returned array
   863   is itself dynamically allocated and should also be freed when it is
   864   no longer needed. Otherwise, the chunks array must be of at least
   865   n_elements in length. It is filled in with the pointers to the
   866   chunks.
   867 
   868   In either case, independent_calloc returns this pointer array, or
   869   null if the allocation failed.  If n_elements is zero and "chunks"
   870   is null, it returns a chunk representing an array with zero elements
   871   (which should be freed if not wanted).
   872 
   873   Each element must be individually freed when it is no longer
   874   needed. If you'd like to instead be able to free all at once, you
   875   should instead use regular calloc and assign pointers into this
   876   space to represent elements.  (In this case though, you cannot
   877   independently free elements.)
   878 
   879   independent_calloc simplifies and speeds up implementations of many
   880   kinds of pools.  It may also be useful when constructing large data
   881   structures that initially have a fixed number of fixed-sized nodes,
   882   but the number is not known at compile time, and some of the nodes
   883   may later need to be freed. For example:
   884 
   885   struct Node { int item; struct Node* next; };
   886 
   887   struct Node* build_list() {
   888     struct Node** pool;
   889     int n = read_number_of_nodes_needed();
   890     if (n <= 0) return 0;
   891     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   892     if (pool == 0) die();
   893     // organize into a linked list...
   894     struct Node* first = pool[0];
   895     for (i = 0; i < n-1; ++i)
   896       pool[i]->next = pool[i+1];
   897     free(pool);     // Can now free the array (or not, if it is needed later)
   898     return first;
   899   }
   900 */
   901 void** dlindependent_calloc(size_t, size_t, void**);
   902 
   903 /*
   904   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   905 
   906   independent_comalloc allocates, all at once, a set of n_elements
   907   chunks with sizes indicated in the "sizes" array.    It returns
   908   an array of pointers to these elements, each of which can be
   909   independently freed, realloc'ed etc. The elements are guaranteed to
   910   be adjacently allocated (this is not guaranteed to occur with
   911   multiple callocs or mallocs), which may also improve cache locality
   912   in some applications.
   913 
   914   The "chunks" argument is optional (i.e., may be null). If it is null
   915   the returned array is itself dynamically allocated and should also
   916   be freed when it is no longer needed. Otherwise, the chunks array
   917   must be of at least n_elements in length. It is filled in with the
   918   pointers to the chunks.
   919 
   920   In either case, independent_comalloc returns this pointer array, or
   921   null if the allocation failed.  If n_elements is zero and chunks is
   922   null, it returns a chunk representing an array with zero elements
   923   (which should be freed if not wanted).
   924 
   925   Each element must be individually freed when it is no longer
   926   needed. If you'd like to instead be able to free all at once, you
   927   should instead use a single regular malloc, and assign pointers at
   928   particular offsets in the aggregate space. (In this case though, you
   929   cannot independently free elements.)
   930 
   931   independent_comallac differs from independent_calloc in that each
   932   element may have a different size, and also that it does not
   933   automatically clear elements.
   934 
   935   independent_comalloc can be used to speed up allocation in cases
   936   where several structs or objects must always be allocated at the
   937   same time.  For example:
   938 
   939   struct Head { ... }
   940   struct Foot { ... }
   941 
   942   void send_message(char* msg) {
   943     int msglen = strlen(msg);
   944     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   945     void* chunks[3];
   946     if (independent_comalloc(3, sizes, chunks) == 0)
   947       die();
   948     struct Head* head = (struct Head*)(chunks[0]);
   949     char*        body = (char*)(chunks[1]);
   950     struct Foot* foot = (struct Foot*)(chunks[2]);
   951     // ...
   952   }
   953 
   954   In general though, independent_comalloc is worth using only for
   955   larger values of n_elements. For small values, you probably won't
   956   detect enough difference from series of malloc calls to bother.
   957 
   958   Overuse of independent_comalloc can increase overall memory usage,
   959   since it cannot reuse existing noncontiguous small chunks that
   960   might be available for some of the elements.
   961 */
   962 void** dlindependent_comalloc(size_t, size_t*, void**);
   963 
   964 
   965 /*
   966   pvalloc(size_t n);
   967   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   968   round up n to nearest pagesize.
   969  */
   970 void*  dlpvalloc(size_t);
   971 
   972 /*
   973   malloc_trim(size_t pad);
   974 
   975   If possible, gives memory back to the system (via negative arguments
   976   to sbrk) if there is unused memory at the `high' end of the malloc
   977   pool or in unused MMAP segments. You can call this after freeing
   978   large blocks of memory to potentially reduce the system-level memory
   979   requirements of a program. However, it cannot guarantee to reduce
   980   memory. Under some allocation patterns, some large free blocks of
   981   memory will be locked between two used chunks, so they cannot be
   982   given back to the system.
   983 
   984   The `pad' argument to malloc_trim represents the amount of free
   985   trailing space to leave untrimmed. If this argument is zero, only
   986   the minimum amount of memory to maintain internal data structures
   987   will be left. Non-zero arguments can be supplied to maintain enough
   988   trailing space to service future expected allocations without having
   989   to re-obtain memory from the system.
   990 
   991   Malloc_trim returns 1 if it actually released any memory, else 0.
   992 */
   993 int  dlmalloc_trim(size_t);
   994 
   995 /*
   996   malloc_usable_size(void* p);
   997 
   998   Returns the number of bytes you can actually use in
   999   an allocated chunk, which may be more than you requested (although
  1000   often not) due to alignment and minimum size constraints.
  1001   You can use this many bytes without worrying about
  1002   overwriting other allocated objects. This is not a particularly great
  1003   programming practice. malloc_usable_size can be more useful in
  1004   debugging and assertions, for example:
  1005 
  1006   p = malloc(n);
  1007   assert(malloc_usable_size(p) >= 256);
  1008 */
  1009 size_t dlmalloc_usable_size(void*);
  1010 
  1011 /*
  1012   malloc_stats();
  1013   Prints on stderr the amount of space obtained from the system (both
  1014   via sbrk and mmap), the maximum amount (which may be more than
  1015   current if malloc_trim and/or munmap got called), and the current
  1016   number of bytes allocated via malloc (or realloc, etc) but not yet
  1017   freed. Note that this is the number of bytes allocated, not the
  1018   number requested. It will be larger than the number requested
  1019   because of alignment and bookkeeping overhead. Because it includes
  1020   alignment wastage as being in use, this figure may be greater than
  1021   zero even when no user-level chunks are allocated.
  1022 
  1023   The reported current and maximum system memory can be inaccurate if
  1024   a program makes other calls to system memory allocation functions
  1025   (normally sbrk) outside of malloc.
  1026 
  1027   malloc_stats prints only the most commonly interesting statistics.
  1028   More information can be obtained by calling mallinfo.
  1029 */
  1030 void  dlmalloc_stats(void);
  1031 
  1032 #endif /* ONLY_MSPACES */
  1033 
  1034 #if MSPACES
  1035 
  1036 /*
  1037   mspace is an opaque type representing an independent
  1038   region of space that supports mspace_malloc, etc.
  1039 */
  1040 typedef void* mspace;
  1041 
  1042 /*
  1043   create_mspace creates and returns a new independent space with the
  1044   given initial capacity, or, if 0, the default granularity size.  It
  1045   returns null if there is no system memory available to create the
  1046   space.  If argument locked is non-zero, the space uses a separate
  1047   lock to control access. The capacity of the space will grow
  1048   dynamically as needed to service mspace_malloc requests.  You can
  1049   control the sizes of incremental increases of this space by
  1050   compiling with a different DEFAULT_GRANULARITY or dynamically
  1051   setting with mallopt(M_GRANULARITY, value).
  1052 */
  1053 mspace create_mspace(size_t capacity, int locked);
  1054 
  1055 /*
  1056   destroy_mspace destroys the given space, and attempts to return all
  1057   of its memory back to the system, returning the total number of
  1058   bytes freed. After destruction, the results of access to all memory
  1059   used by the space become undefined.
  1060 */
  1061 size_t destroy_mspace(mspace msp);
  1062 
  1063 /*
  1064   create_mspace_with_base uses the memory supplied as the initial base
  1065   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1066   space is used for bookkeeping, so the capacity must be at least this
  1067   large. (Otherwise 0 is returned.) When this initial space is
  1068   exhausted, additional memory will be obtained from the system.
  1069   Destroying this space will deallocate all additionally allocated
  1070   space (if possible) but not the initial base.
  1071 */
  1072 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1073 
  1074 /*
  1075   mspace_malloc behaves as malloc, but operates within
  1076   the given space.
  1077 */
  1078 void* mspace_malloc(mspace msp, size_t bytes);
  1079 
  1080 /*
  1081   mspace_free behaves as free, but operates within
  1082   the given space.
  1083 
  1084   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1085   free may be called instead of mspace_free because freed chunks from
  1086   any space are handled by their originating spaces.
  1087 */
  1088 void mspace_free(mspace msp, void* mem);
  1089 
  1090 /*
  1091   mspace_realloc behaves as realloc, but operates within
  1092   the given space.
  1093 
  1094   If compiled with FOOTERS==1, mspace_realloc is not actually
  1095   needed.  realloc may be called instead of mspace_realloc because
  1096   realloced chunks from any space are handled by their originating
  1097   spaces.
  1098 */
  1099 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1100 
  1101 /*
  1102   mspace_calloc behaves as calloc, but operates within
  1103   the given space.
  1104 */
  1105 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1106 
  1107 /*
  1108   mspace_memalign behaves as memalign, but operates within
  1109   the given space.
  1110 */
  1111 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1112 
  1113 /*
  1114   mspace_independent_calloc behaves as independent_calloc, but
  1115   operates within the given space.
  1116 */
  1117 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1118                                  size_t elem_size, void* chunks[]);
  1119 
  1120 /*
  1121   mspace_independent_comalloc behaves as independent_comalloc, but
  1122   operates within the given space.
  1123 */
  1124 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1125                                    size_t sizes[], void* chunks[]);
  1126 
  1127 /*
  1128   mspace_footprint() returns the number of bytes obtained from the
  1129   system for this space.
  1130 */
  1131 size_t mspace_footprint(mspace msp);
  1132 
  1133 /*
  1134   mspace_max_footprint() returns the peak number of bytes obtained from the
  1135   system for this space.
  1136 */
  1137 size_t mspace_max_footprint(mspace msp);
  1138 
  1139 
  1140 #if !NO_MALLINFO
  1141 /*
  1142   mspace_mallinfo behaves as mallinfo, but reports properties of
  1143   the given space.
  1144 */
  1145 struct mallinfo mspace_mallinfo(mspace msp);
  1146 #endif /* NO_MALLINFO */
  1147 
  1148 /*
  1149   mspace_malloc_stats behaves as malloc_stats, but reports
  1150   properties of the given space.
  1151 */
  1152 void mspace_malloc_stats(mspace msp);
  1153 
  1154 /*
  1155   mspace_trim behaves as malloc_trim, but
  1156   operates within the given space.
  1157 */
  1158 int mspace_trim(mspace msp, size_t pad);
  1159 
  1160 /*
  1161   An alias for mallopt.
  1162 */
  1163 int mspace_mallopt(int, int);
  1164 
  1165 #endif /* MSPACES */
  1166 
  1167 #ifdef __cplusplus
  1168 };  /* end of extern "C" */
  1169 #endif /* __cplusplus */
  1170 
  1171 /*
  1172   ========================================================================
  1173   To make a fully customizable malloc.h header file, cut everything
  1174   above this line, put into file malloc.h, edit to suit, and #include it
  1175   on the next line, as well as in programs that use this malloc.
  1176   ========================================================================
  1177 */
  1178 
  1179 /* #include "malloc.h" */
  1180 
  1181 /*------------------------------ internal #includes ---------------------- */
  1182 
  1183 #ifdef _MSC_VER
  1184 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1185 #endif /* _MSC_VER */
  1186 
  1187 #ifndef LACKS_STDIO_H
  1188 #include <stdio.h>       /* for printing in malloc_stats */
  1189 #endif
  1190 
  1191 #ifndef LACKS_ERRNO_H
  1192 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1193 #endif /* LACKS_ERRNO_H */
  1194 #if FOOTERS
  1195 #include <time.h>        /* for magic initialization */
  1196 #endif /* FOOTERS */
  1197 #ifndef LACKS_STDLIB_H
  1198 #include <stdlib.h>      /* for abort() */
  1199 #endif /* LACKS_STDLIB_H */
  1200 #ifdef DEBUG
  1201 #if ABORT_ON_ASSERT_FAILURE
  1202 #define assert(x) if(!(x)) ABORT
  1203 #else /* ABORT_ON_ASSERT_FAILURE */
  1204 #include <assert.h>
  1205 #endif /* ABORT_ON_ASSERT_FAILURE */
  1206 #else  /* DEBUG */
  1207 #define assert(x)
  1208 #endif /* DEBUG */
  1209 #ifndef LACKS_STRING_H
  1210 #include <string.h>      /* for memset etc */
  1211 #endif  /* LACKS_STRING_H */
  1212 #if USE_BUILTIN_FFS
  1213 #ifndef LACKS_STRINGS_H
  1214 #include <strings.h>     /* for ffs */
  1215 #endif /* LACKS_STRINGS_H */
  1216 #endif /* USE_BUILTIN_FFS */
  1217 #if HAVE_MMAP
  1218 #ifndef LACKS_SYS_MMAN_H
  1219 #include <sys/mman.h>    /* for mmap */
  1220 #endif /* LACKS_SYS_MMAN_H */
  1221 #ifndef LACKS_FCNTL_H
  1222 #include <fcntl.h>
  1223 #endif /* LACKS_FCNTL_H */
  1224 #endif /* HAVE_MMAP */
  1225 #if HAVE_MORECORE
  1226 #ifndef LACKS_UNISTD_H
  1227 #include <unistd.h>     /* for sbrk */
  1228 #else /* LACKS_UNISTD_H */
  1229 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1230 extern void*     sbrk(ptrdiff_t);
  1231 #endif /* FreeBSD etc */
  1232 #endif /* LACKS_UNISTD_H */
  1233 #endif /* HAVE_MMAP */
  1234 
  1235 #ifndef WIN32
  1236 #ifndef malloc_getpagesize
  1237 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1238 #    ifndef _SC_PAGE_SIZE
  1239 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1240 #    endif
  1241 #  endif
  1242 #  ifdef _SC_PAGE_SIZE
  1243 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1244 #  else
  1245 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1246        extern size_t getpagesize();
  1247 #      define malloc_getpagesize getpagesize()
  1248 #    else
  1249 #      ifdef WIN32 /* use supplied emulation of getpagesize */
  1250 #        define malloc_getpagesize getpagesize()
  1251 #      else
  1252 #        ifndef LACKS_SYS_PARAM_H
  1253 #          include <sys/param.h>
  1254 #        endif
  1255 #        ifdef EXEC_PAGESIZE
  1256 #          define malloc_getpagesize EXEC_PAGESIZE
  1257 #        else
  1258 #          ifdef NBPG
  1259 #            ifndef CLSIZE
  1260 #              define malloc_getpagesize NBPG
  1261 #            else
  1262 #              define malloc_getpagesize (NBPG * CLSIZE)
  1263 #            endif
  1264 #          else
  1265 #            ifdef NBPC
  1266 #              define malloc_getpagesize NBPC
  1267 #            else
  1268 #              ifdef PAGESIZE
  1269 #                define malloc_getpagesize PAGESIZE
  1270 #              else /* just guess */
  1271 #                define malloc_getpagesize ((size_t)4096U)
  1272 #              endif
  1273 #            endif
  1274 #          endif
  1275 #        endif
  1276 #      endif
  1277 #    endif
  1278 #  endif
  1279 #endif
  1280 #endif
  1281 
  1282 /* ------------------- size_t and alignment properties -------------------- */
  1283 
  1284 /* The byte and bit size of a size_t */
  1285 #define SIZE_T_SIZE         (sizeof(size_t))
  1286 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1287 
  1288 /* Some constants coerced to size_t */
  1289 /* Annoying but necessary to avoid errors on some plaftorms */
  1290 #define SIZE_T_ZERO         ((size_t)0)
  1291 #define SIZE_T_ONE          ((size_t)1)
  1292 #define SIZE_T_TWO          ((size_t)2)
  1293 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1294 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1295 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1296 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1297 
  1298 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1299 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1300 
  1301 /* True if address a has acceptable alignment */
  1302 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1303 
  1304 /* the number of bytes to offset an address to align it */
  1305 #define align_offset(A)\
  1306  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1307   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1308 
  1309 /* -------------------------- MMAP preliminaries ------------------------- */
  1310 
  1311 /*
  1312    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1313    checks to fail so compiler optimizer can delete code rather than
  1314    using so many "#if"s.
  1315 */
  1316 
  1317 
  1318 /* MORECORE and MMAP must return MFAIL on failure */
  1319 #define MFAIL                ((void*)(MAX_SIZE_T))
  1320 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1321 
  1322 #if !HAVE_MMAP
  1323 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1324 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1325 #define CALL_MMAP(s)         MFAIL
  1326 #define CALL_MUNMAP(a, s)    (-1)
  1327 #define DIRECT_MMAP(s)       MFAIL
  1328 
  1329 #else /* HAVE_MMAP */
  1330 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1331 #define USE_MMAP_BIT         (SIZE_T_ONE)
  1332 
  1333 #ifndef WIN32
  1334 #define CALL_MUNMAP(a, s)    munmap((a), (s))
  1335 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1336 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1337 #define MAP_ANONYMOUS        MAP_ANON
  1338 #endif /* MAP_ANON */
  1339 #ifdef MAP_ANONYMOUS
  1340 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1341 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1342 #else /* MAP_ANONYMOUS */
  1343 /*
  1344    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1345    is unlikely to be needed, but is supplied just in case.
  1346 */
  1347 #define MMAP_FLAGS           (MAP_PRIVATE)
  1348 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1349 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1350            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1351             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1352             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1353 #endif /* MAP_ANONYMOUS */
  1354 
  1355 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1356 #else /* WIN32 */
  1357 
  1358 /* Win32 MMAP via VirtualAlloc */
  1359 static void* win32mmap(size_t size) {
  1360   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1361   return (ptr != 0)? ptr: MFAIL;
  1362 }
  1363 
  1364 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1365 static void* win32direct_mmap(size_t size) {
  1366   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1367                            PAGE_READWRITE);
  1368   return (ptr != 0)? ptr: MFAIL;
  1369 }
  1370 
  1371 /* This function supports releasing coalesed segments */
  1372 static int win32munmap(void* ptr, size_t size) {
  1373   MEMORY_BASIC_INFORMATION minfo;
  1374   char* cptr = ptr;
  1375   while (size) {
  1376     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1377       return -1;
  1378     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1379         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1380       return -1;
  1381     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1382       return -1;
  1383     cptr += minfo.RegionSize;
  1384     size -= minfo.RegionSize;
  1385   }
  1386   return 0;
  1387 }
  1388 
  1389 #define CALL_MMAP(s)         win32mmap(s)
  1390 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1391 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1392 #endif /* WIN32 */
  1393 #endif /* HAVE_MMAP */
  1394 
  1395 #if HAVE_MMAP && HAVE_MREMAP
  1396 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1397 #else  /* HAVE_MMAP && HAVE_MREMAP */
  1398 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1399 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1400 
  1401 #if HAVE_MORECORE
  1402 #define CALL_MORECORE(S)     MORECORE(S)
  1403 #else  /* HAVE_MORECORE */
  1404 #define CALL_MORECORE(S)     MFAIL
  1405 #endif /* HAVE_MORECORE */
  1406 
  1407 /* mstate bit set if continguous morecore disabled or failed */
  1408 #define USE_NONCONTIGUOUS_BIT (4U)
  1409 
  1410 /* segment bit set in create_mspace_with_base */
  1411 #define EXTERN_BIT            (8U)
  1412 
  1413 
  1414 /* --------------------------- Lock preliminaries ------------------------ */
  1415 
  1416 #if USE_LOCKS
  1417 
  1418 /*
  1419   When locks are defined, there are up to two global locks:
  1420 
  1421   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1422     MORECORE.  In many cases sys_alloc requires two calls, that should
  1423     not be interleaved with calls by other threads.  This does not
  1424     protect against direct calls to MORECORE by other threads not
  1425     using this lock, so there is still code to cope the best we can on
  1426     interference.
  1427 
  1428   * magic_init_mutex ensures that mparams.magic and other
  1429     unique mparams values are initialized only once.
  1430 */
  1431 
  1432 #ifndef WIN32
  1433 /* By default use posix locks */
  1434 #include <pthread.h>
  1435 #define MLOCK_T pthread_mutex_t
  1436 #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1437 #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1438 #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1439 
  1440 #if HAVE_MORECORE
  1441 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1442 #endif /* HAVE_MORECORE */
  1443 
  1444 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1445 
  1446 #else /* WIN32 */
  1447 /*
  1448    Because lock-protected regions have bounded times, and there
  1449    are no recursive lock calls, we can use simple spinlocks.
  1450 */
  1451 
  1452 #define MLOCK_T long
  1453 static int win32_acquire_lock (MLOCK_T *sl) {
  1454   for (;;) {
  1455 #ifdef InterlockedCompareExchangePointer
  1456     if (!InterlockedCompareExchange(sl, 1, 0))
  1457       return 0;
  1458 #else  /* Use older void* version */
  1459     if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1460       return 0;
  1461 #endif /* InterlockedCompareExchangePointer */
  1462     Sleep (0);
  1463   }
  1464 }
  1465 
  1466 static void win32_release_lock (MLOCK_T *sl) {
  1467   InterlockedExchange (sl, 0);
  1468 }
  1469 
  1470 #define INITIAL_LOCK(l)      *(l)=0
  1471 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1472 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1473 #if HAVE_MORECORE
  1474 static MLOCK_T morecore_mutex;
  1475 #endif /* HAVE_MORECORE */
  1476 static MLOCK_T magic_init_mutex;
  1477 #endif /* WIN32 */
  1478 
  1479 #define USE_LOCK_BIT               (2U)
  1480 #else  /* USE_LOCKS */
  1481 #define USE_LOCK_BIT               (0U)
  1482 #define INITIAL_LOCK(l)
  1483 #endif /* USE_LOCKS */
  1484 
  1485 #if USE_LOCKS && HAVE_MORECORE
  1486 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1487 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1488 #else /* USE_LOCKS && HAVE_MORECORE */
  1489 #define ACQUIRE_MORECORE_LOCK()
  1490 #define RELEASE_MORECORE_LOCK()
  1491 #endif /* USE_LOCKS && HAVE_MORECORE */
  1492 
  1493 #if USE_LOCKS
  1494 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1495 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1496 #else  /* USE_LOCKS */
  1497 #define ACQUIRE_MAGIC_INIT_LOCK()
  1498 #define RELEASE_MAGIC_INIT_LOCK()
  1499 #endif /* USE_LOCKS */
  1500 
  1501 
  1502 /* -----------------------  Chunk representations ------------------------ */
  1503 
  1504 /*
  1505   (The following includes lightly edited explanations by Colin Plumb.)
  1506 
  1507   The malloc_chunk declaration below is misleading (but accurate and
  1508   necessary).  It declares a "view" into memory allowing access to
  1509   necessary fields at known offsets from a given base.
  1510 
  1511   Chunks of memory are maintained using a `boundary tag' method as
  1512   originally described by Knuth.  (See the paper by Paul Wilson
  1513   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1514   techniques.)  Sizes of free chunks are stored both in the front of
  1515   each chunk and at the end.  This makes consolidating fragmented
  1516   chunks into bigger chunks fast.  The head fields also hold bits
  1517   representing whether chunks are free or in use.
  1518 
  1519   Here are some pictures to make it clearer.  They are "exploded" to
  1520   show that the state of a chunk can be thought of as extending from
  1521   the high 31 bits of the head field of its header through the
  1522   prev_foot and PINUSE_BIT bit of the following chunk header.
  1523 
  1524   A chunk that's in use looks like:
  1525 
  1526    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1527            | Size of previous chunk (if P = 1)                             |
  1528            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1529          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1530          | Size of this chunk                                         1| +-+
  1531    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1532          |                                                               |
  1533          +-                                                             -+
  1534          |                                                               |
  1535          +-                                                             -+
  1536          |                                                               :
  1537          +-      size - sizeof(size_t) available payload bytes          -+
  1538          :                                                               |
  1539  chunk-> +-                                                             -+
  1540          |                                                               |
  1541          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1542        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1543        | Size of next chunk (may or may not be in use)               | +-+
  1544  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1545 
  1546     And if it's free, it looks like this:
  1547 
  1548    chunk-> +-                                                             -+
  1549            | User payload (must be in use, or we would have merged!)       |
  1550            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1551          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1552          | Size of this chunk                                         0| +-+
  1553    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1554          | Next pointer                                                  |
  1555          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1556          | Prev pointer                                                  |
  1557          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1558          |                                                               :
  1559          +-      size - sizeof(struct chunk) unused bytes               -+
  1560          :                                                               |
  1561  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1562          | Size of this chunk                                            |
  1563          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1564        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1565        | Size of next chunk (must be in use, or we would have merged)| +-+
  1566  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1567        |                                                               :
  1568        +- User payload                                                -+
  1569        :                                                               |
  1570        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1571                                                                      |0|
  1572                                                                      +-+
  1573   Note that since we always merge adjacent free chunks, the chunks
  1574   adjacent to a free chunk must be in use.
  1575 
  1576   Given a pointer to a chunk (which can be derived trivially from the
  1577   payload pointer) we can, in O(1) time, find out whether the adjacent
  1578   chunks are free, and if so, unlink them from the lists that they
  1579   are on and merge them with the current chunk.
  1580 
  1581   Chunks always begin on even word boundaries, so the mem portion
  1582   (which is returned to the user) is also on an even word boundary, and
  1583   thus at least double-word aligned.
  1584 
  1585   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1586   chunk size (which is always a multiple of two words), is an in-use
  1587   bit for the *previous* chunk.  If that bit is *clear*, then the
  1588   word before the current chunk size contains the previous chunk
  1589   size, and can be used to find the front of the previous chunk.
  1590   The very first chunk allocated always has this bit set, preventing
  1591   access to non-existent (or non-owned) memory. If pinuse is set for
  1592   any given chunk, then you CANNOT determine the size of the
  1593   previous chunk, and might even get a memory addressing fault when
  1594   trying to do so.
  1595 
  1596   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1597   the chunk size redundantly records whether the current chunk is
  1598   inuse. This redundancy enables usage checks within free and realloc,
  1599   and reduces indirection when freeing and consolidating chunks.
  1600 
  1601   Each freshly allocated chunk must have both cinuse and pinuse set.
  1602   That is, each allocated chunk borders either a previously allocated
  1603   and still in-use chunk, or the base of its memory arena. This is
  1604   ensured by making all allocations from the the `lowest' part of any
  1605   found chunk.  Further, no free chunk physically borders another one,
  1606   so each free chunk is known to be preceded and followed by either
  1607   inuse chunks or the ends of memory.
  1608 
  1609   Note that the `foot' of the current chunk is actually represented
  1610   as the prev_foot of the NEXT chunk. This makes it easier to
  1611   deal with alignments etc but can be very confusing when trying
  1612   to extend or adapt this code.
  1613 
  1614   The exceptions to all this are
  1615 
  1616      1. The special chunk `top' is the top-most available chunk (i.e.,
  1617         the one bordering the end of available memory). It is treated
  1618         specially.  Top is never included in any bin, is used only if
  1619         no other chunk is available, and is released back to the
  1620         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1621         the top chunk is treated as larger (and thus less well
  1622         fitting) than any other available chunk.  The top chunk
  1623         doesn't update its trailing size field since there is no next
  1624         contiguous chunk that would have to index off it. However,
  1625         space is still allocated for it (TOP_FOOT_SIZE) to enable
  1626         separation or merging when space is extended.
  1627 
  1628      3. Chunks allocated via mmap, which have the lowest-order bit
  1629         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1630         PINUSE_BIT in their head fields.  Because they are allocated
  1631         one-by-one, each must carry its own prev_foot field, which is
  1632         also used to hold the offset this chunk has within its mmapped
  1633         region, which is needed to preserve alignment. Each mmapped
  1634         chunk is trailed by the first two fields of a fake next-chunk
  1635         for sake of usage checks.
  1636 
  1637 */
  1638 
  1639 struct malloc_chunk {
  1640   size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1641   size_t               head;       /* Size and inuse bits. */
  1642   struct malloc_chunk* fd;         /* double links -- used only if free. */
  1643   struct malloc_chunk* bk;
  1644 };
  1645 
  1646 typedef struct malloc_chunk  mchunk;
  1647 typedef struct malloc_chunk* mchunkptr;
  1648 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1649 typedef unsigned int bindex_t;         /* Described below */
  1650 typedef unsigned int binmap_t;         /* Described below */
  1651 typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1652 
  1653 /* ------------------- Chunks sizes and alignments ----------------------- */
  1654 
  1655 #define MCHUNK_SIZE         (sizeof(mchunk))
  1656 
  1657 #if FOOTERS
  1658 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1659 #else /* FOOTERS */
  1660 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1661 #endif /* FOOTERS */
  1662 
  1663 /* MMapped chunks need a second word of overhead ... */
  1664 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1665 /* ... and additional padding for fake next-chunk at foot */
  1666 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1667 
  1668 /* The smallest size we can malloc is an aligned minimal chunk */
  1669 #define MIN_CHUNK_SIZE\
  1670   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1671 
  1672 /* conversion from malloc headers to user pointers, and back */
  1673 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1674 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1675 /* chunk associated with aligned address A */
  1676 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1677 
  1678 /* Bounds on request (not chunk) sizes. */
  1679 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1680 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1681 
  1682 /* pad request bytes into a usable size */
  1683 #define pad_request(req) \
  1684    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1685 
  1686 /* pad request, checking for minimum (but not maximum) */
  1687 #define request2size(req) \
  1688   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1689 
  1690 
  1691 /* ------------------ Operations on head and foot fields ----------------- */
  1692 
  1693 /*
  1694   The head field of a chunk is or'ed with PINUSE_BIT when previous
  1695   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1696   use. If the chunk was obtained with mmap, the prev_foot field has
  1697   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1698   mmapped region to the base of the chunk.
  1699 */
  1700 
  1701 #define PINUSE_BIT          (SIZE_T_ONE)
  1702 #define CINUSE_BIT          (SIZE_T_TWO)
  1703 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1704 
  1705 /* Head value for fenceposts */
  1706 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1707 
  1708 /* extraction of fields from head words */
  1709 #define cinuse(p)           ((p)->head & CINUSE_BIT)
  1710 #define pinuse(p)           ((p)->head & PINUSE_BIT)
  1711 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1712 
  1713 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1714 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1715 
  1716 /* Treat space at ptr +/- offset as a chunk */
  1717 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1718 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1719 
  1720 /* Ptr to next or previous physical malloc_chunk. */
  1721 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1722 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1723 
  1724 /* extract next chunk's pinuse bit */
  1725 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1726 
  1727 /* Get/set size at footer */
  1728 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1729 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1730 
  1731 /* Set size, pinuse bit, and foot */
  1732 #define set_size_and_pinuse_of_free_chunk(p, s)\
  1733   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1734 
  1735 /* Set size, pinuse bit, foot, and clear next pinuse */
  1736 #define set_free_with_pinuse(p, s, n)\
  1737   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1738 
  1739 #define is_mmapped(p)\
  1740   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1741 
  1742 /* Get the internal overhead associated with chunk p */
  1743 #define overhead_for(p)\
  1744  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1745 
  1746 /* Return true if malloced space is not necessarily cleared */
  1747 #if MMAP_CLEARS
  1748 #define calloc_must_clear(p) (!is_mmapped(p))
  1749 #else /* MMAP_CLEARS */
  1750 #define calloc_must_clear(p) (1)
  1751 #endif /* MMAP_CLEARS */
  1752 
  1753 /* ---------------------- Overlaid data structures ----------------------- */
  1754 
  1755 /*
  1756   When chunks are not in use, they are treated as nodes of either
  1757   lists or trees.
  1758 
  1759   "Small"  chunks are stored in circular doubly-linked lists, and look
  1760   like this:
  1761 
  1762     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1763             |             Size of previous chunk                            |
  1764             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1765     `head:' |             Size of chunk, in bytes                         |P|
  1766       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1767             |             Forward pointer to next chunk in list             |
  1768             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1769             |             Back pointer to previous chunk in list            |
  1770             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1771             |             Unused space (may be 0 bytes long)                .
  1772             .                                                               .
  1773             .                                                               |
  1774 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1775     `foot:' |             Size of chunk, in bytes                           |
  1776             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1777 
  1778   Larger chunks are kept in a form of bitwise digital trees (aka
  1779   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1780   free chunks greater than 256 bytes, their size doesn't impose any
  1781   constraints on user chunk sizes.  Each node looks like:
  1782 
  1783     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1784             |             Size of previous chunk                            |
  1785             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1786     `head:' |             Size of chunk, in bytes                         |P|
  1787       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1788             |             Forward pointer to next chunk of same size        |
  1789             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1790             |             Back pointer to previous chunk of same size       |
  1791             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1792             |             Pointer to left child (child[0])                  |
  1793             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1794             |             Pointer to right child (child[1])                 |
  1795             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1796             |             Pointer to parent                                 |
  1797             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1798             |             bin index of this chunk                           |
  1799             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1800             |             Unused space                                      .
  1801             .                                                               |
  1802 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1803     `foot:' |             Size of chunk, in bytes                           |
  1804             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1805 
  1806   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1807   of the same size are arranged in a circularly-linked list, with only
  1808   the oldest chunk (the next to be used, in our FIFO ordering)
  1809   actually in the tree.  (Tree members are distinguished by a non-null
  1810   parent pointer.)  If a chunk with the same size an an existing node
  1811   is inserted, it is linked off the existing node using pointers that
  1812   work in the same way as fd/bk pointers of small chunks.
  1813 
  1814   Each tree contains a power of 2 sized range of chunk sizes (the
  1815   smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1816   tree level, with the chunks in the smaller half of the range (0x100
  1817   <= x < 0x140 for the top nose) in the left subtree and the larger
  1818   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1819   done by inspecting individual bits.
  1820 
  1821   Using these rules, each node's left subtree contains all smaller
  1822   sizes than its right subtree.  However, the node at the root of each
  1823   subtree has no particular ordering relationship to either.  (The
  1824   dividing line between the subtree sizes is based on trie relation.)
  1825   If we remove the last chunk of a given size from the interior of the
  1826   tree, we need to replace it with a leaf node.  The tree ordering
  1827   rules permit a node to be replaced by any leaf below it.
  1828 
  1829   The smallest chunk in a tree (a common operation in a best-fit
  1830   allocator) can be found by walking a path to the leftmost leaf in
  1831   the tree.  Unlike a usual binary tree, where we follow left child
  1832   pointers until we reach a null, here we follow the right child
  1833   pointer any time the left one is null, until we reach a leaf with
  1834   both child pointers null. The smallest chunk in the tree will be
  1835   somewhere along that path.
  1836 
  1837   The worst case number of steps to add, find, or remove a node is
  1838   bounded by the number of bits differentiating chunks within
  1839   bins. Under current bin calculations, this ranges from 6 up to 21
  1840   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1841   is of course much better.
  1842 */
  1843 
  1844 struct malloc_tree_chunk {
  1845   /* The first four fields must be compatible with malloc_chunk */
  1846   size_t                    prev_foot;
  1847   size_t                    head;
  1848   struct malloc_tree_chunk* fd;
  1849   struct malloc_tree_chunk* bk;
  1850 
  1851   struct malloc_tree_chunk* child[2];
  1852   struct malloc_tree_chunk* parent;
  1853   bindex_t                  index;
  1854 };
  1855 
  1856 typedef struct malloc_tree_chunk  tchunk;
  1857 typedef struct malloc_tree_chunk* tchunkptr;
  1858 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1859 
  1860 /* A little helper macro for trees */
  1861 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1862 
  1863 /* ----------------------------- Segments -------------------------------- */
  1864 
  1865 /*
  1866   Each malloc space may include non-contiguous segments, held in a
  1867   list headed by an embedded malloc_segment record representing the
  1868   top-most space. Segments also include flags holding properties of
  1869   the space. Large chunks that are directly allocated by mmap are not
  1870   included in this list. They are instead independently created and
  1871   destroyed without otherwise keeping track of them.
  1872 
  1873   Segment management mainly comes into play for spaces allocated by
  1874   MMAP.  Any call to MMAP might or might not return memory that is
  1875   adjacent to an existing segment.  MORECORE normally contiguously
  1876   extends the current space, so this space is almost always adjacent,
  1877   which is simpler and faster to deal with. (This is why MORECORE is
  1878   used preferentially to MMAP when both are available -- see
  1879   sys_alloc.)  When allocating using MMAP, we don't use any of the
  1880   hinting mechanisms (inconsistently) supported in various
  1881   implementations of unix mmap, or distinguish reserving from
  1882   committing memory. Instead, we just ask for space, and exploit
  1883   contiguity when we get it.  It is probably possible to do
  1884   better than this on some systems, but no general scheme seems
  1885   to be significantly better.
  1886 
  1887   Management entails a simpler variant of the consolidation scheme
  1888   used for chunks to reduce fragmentation -- new adjacent memory is
  1889   normally prepended or appended to an existing segment. However,
  1890   there are limitations compared to chunk consolidation that mostly
  1891   reflect the fact that segment processing is relatively infrequent
  1892   (occurring only when getting memory from system) and that we
  1893   don't expect to have huge numbers of segments:
  1894 
  1895   * Segments are not indexed, so traversal requires linear scans.  (It
  1896     would be possible to index these, but is not worth the extra
  1897     overhead and complexity for most programs on most platforms.)
  1898   * New segments are only appended to old ones when holding top-most
  1899     memory; if they cannot be prepended to others, they are held in
  1900     different segments.
  1901 
  1902   Except for the top-most segment of an mstate, each segment record
  1903   is kept at the tail of its segment. Segments are added by pushing
  1904   segment records onto the list headed by &mstate.seg for the
  1905   containing mstate.
  1906 
  1907   Segment flags control allocation/merge/deallocation policies:
  1908   * If EXTERN_BIT set, then we did not allocate this segment,
  1909     and so should not try to deallocate or merge with others.
  1910     (This currently holds only for the initial segment passed
  1911     into create_mspace_with_base.)
  1912   * If IS_MMAPPED_BIT set, the segment may be merged with
  1913     other surrounding mmapped segments and trimmed/de-allocated
  1914     using munmap.
  1915   * If neither bit is set, then the segment was obtained using
  1916     MORECORE so can be merged with surrounding MORECORE'd segments
  1917     and deallocated/trimmed using MORECORE with negative arguments.
  1918 */
  1919 
  1920 struct malloc_segment {
  1921   char*        base;             /* base address */
  1922   size_t       size;             /* allocated size */
  1923   struct malloc_segment* next;   /* ptr to next segment */
  1924   flag_t       sflags;           /* mmap and extern flag */
  1925 };
  1926 
  1927 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1928 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1929 
  1930 typedef struct malloc_segment  msegment;
  1931 typedef struct malloc_segment* msegmentptr;
  1932 
  1933 /* ---------------------------- malloc_state ----------------------------- */
  1934 
  1935 /*
  1936    A malloc_state holds all of the bookkeeping for a space.
  1937    The main fields are:
  1938 
  1939   Top
  1940     The topmost chunk of the currently active segment. Its size is
  1941     cached in topsize.  The actual size of topmost space is
  1942     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  1943     fenceposts and segment records if necessary when getting more
  1944     space from the system.  The size at which to autotrim top is
  1945     cached from mparams in trim_check, except that it is disabled if
  1946     an autotrim fails.
  1947 
  1948   Designated victim (dv)
  1949     This is the preferred chunk for servicing small requests that
  1950     don't have exact fits.  It is normally the chunk split off most
  1951     recently to service another small request.  Its size is cached in
  1952     dvsize. The link fields of this chunk are not maintained since it
  1953     is not kept in a bin.
  1954 
  1955   SmallBins
  1956     An array of bin headers for free chunks.  These bins hold chunks
  1957     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  1958     chunks of all the same size, spaced 8 bytes apart.  To simplify
  1959     use in double-linked lists, each bin header acts as a malloc_chunk
  1960     pointing to the real first node, if it exists (else pointing to
  1961     itself).  This avoids special-casing for headers.  But to avoid
  1962     waste, we allocate only the fd/bk pointers of bins, and then use
  1963     repositioning tricks to treat these as the fields of a chunk.
  1964 
  1965   TreeBins
  1966     Treebins are pointers to the roots of trees holding a range of
  1967     sizes. There are 2 equally spaced treebins for each power of two
  1968     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  1969     larger.
  1970 
  1971   Bin maps
  1972     There is one bit map for small bins ("smallmap") and one for
  1973     treebins ("treemap).  Each bin sets its bit when non-empty, and
  1974     clears the bit when empty.  Bit operations are then used to avoid
  1975     bin-by-bin searching -- nearly all "search" is done without ever
  1976     looking at bins that won't be selected.  The bit maps
  1977     conservatively use 32 bits per map word, even if on 64bit system.
  1978     For a good description of some of the bit-based techniques used
  1979     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  1980     supplement at http://hackersdelight.org/). Many of these are
  1981     intended to reduce the branchiness of paths through malloc etc, as
  1982     well as to reduce the number of memory locations read or written.
  1983 
  1984   Segments
  1985     A list of segments headed by an embedded malloc_segment record
  1986     representing the initial space.
  1987 
  1988   Address check support
  1989     The least_addr field is the least address ever obtained from
  1990     MORECORE or MMAP. Attempted frees and reallocs of any address less
  1991     than this are trapped (unless INSECURE is defined).
  1992 
  1993   Magic tag
  1994     A cross-check field that should always hold same value as mparams.magic.
  1995 
  1996   Flags
  1997     Bits recording whether to use MMAP, locks, or contiguous MORECORE
  1998 
  1999   Statistics
  2000     Each space keeps track of current and maximum system memory
  2001     obtained via MORECORE or MMAP.
  2002 
  2003   Locking
  2004     If USE_LOCKS is defined, the "mutex" lock is acquired and released
  2005     around every public call using this mspace.
  2006 */
  2007 
  2008 /* Bin types, widths and sizes */
  2009 #define NSMALLBINS        (32U)
  2010 #define NTREEBINS         (32U)
  2011 #define SMALLBIN_SHIFT    (3U)
  2012 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  2013 #define TREEBIN_SHIFT     (8U)
  2014 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2015 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2016 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2017 
  2018 struct malloc_state {
  2019   binmap_t   smallmap;
  2020   binmap_t   treemap;
  2021   size_t     dvsize;
  2022   size_t     topsize;
  2023   char*      least_addr;
  2024   mchunkptr  dv;
  2025   mchunkptr  top;
  2026   size_t     trim_check;
  2027   size_t     magic;
  2028   mchunkptr  smallbins[(NSMALLBINS+1)*2];
  2029   tbinptr    treebins[NTREEBINS];
  2030   size_t     footprint;
  2031   size_t     max_footprint;
  2032   flag_t     mflags;
  2033 #if USE_LOCKS
  2034   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  2035 #endif /* USE_LOCKS */
  2036   msegment   seg;
  2037 };
  2038 
  2039 typedef struct malloc_state*    mstate;
  2040 
  2041 /* ------------- Global malloc_state and malloc_params ------------------- */
  2042 
  2043 /*
  2044   malloc_params holds global properties, including those that can be
  2045   dynamically set using mallopt. There is a single instance, mparams,
  2046   initialized in init_mparams.
  2047 */
  2048 
  2049 struct malloc_params {
  2050   size_t magic;
  2051   size_t page_size;
  2052   size_t granularity;
  2053   size_t mmap_threshold;
  2054   size_t trim_threshold;
  2055   flag_t default_mflags;
  2056 };
  2057 
  2058 static struct malloc_params mparams;
  2059 
  2060 /* The global malloc_state used for all non-"mspace" calls */
  2061 static struct malloc_state _gm_;
  2062 #define gm                 (&_gm_)
  2063 #define is_global(M)       ((M) == &_gm_)
  2064 #define is_initialized(M)  ((M)->top != 0)
  2065 
  2066 /* -------------------------- system alloc setup ------------------------- */
  2067 
  2068 /* Operations on mflags */
  2069 
  2070 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  2071 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  2072 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  2073 
  2074 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  2075 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  2076 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  2077 
  2078 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  2079 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  2080 
  2081 #define set_lock(M,L)\
  2082  ((M)->mflags = (L)?\
  2083   ((M)->mflags | USE_LOCK_BIT) :\
  2084   ((M)->mflags & ~USE_LOCK_BIT))
  2085 
  2086 /* page-align a size */
  2087 #define page_align(S)\
  2088  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  2089 
  2090 /* granularity-align a size */
  2091 #define granularity_align(S)\
  2092   (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  2093 
  2094 #define is_page_aligned(S)\
  2095    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  2096 #define is_granularity_aligned(S)\
  2097    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  2098 
  2099 /*  True if segment S holds address A */
  2100 #define segment_holds(S, A)\
  2101   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2102 
  2103 /* Return segment holding given address */
  2104 static msegmentptr segment_holding(mstate m, char* addr) {
  2105   msegmentptr sp = &m->seg;
  2106   for (;;) {
  2107     if (addr >= sp->base && addr < sp->base + sp->size)
  2108       return sp;
  2109     if ((sp = sp->next) == 0)
  2110       return 0;
  2111   }
  2112 }
  2113 
  2114 /* Return true if segment contains a segment link */
  2115 static int has_segment_link(mstate m, msegmentptr ss) {
  2116   msegmentptr sp = &m->seg;
  2117   for (;;) {
  2118     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  2119       return 1;
  2120     if ((sp = sp->next) == 0)
  2121       return 0;
  2122   }
  2123 }
  2124 
  2125 #ifndef MORECORE_CANNOT_TRIM
  2126 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2127 #else  /* MORECORE_CANNOT_TRIM */
  2128 #define should_trim(M,s)  (0)
  2129 #endif /* MORECORE_CANNOT_TRIM */
  2130 
  2131 /*
  2132   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2133   that may be needed to place segment records and fenceposts when new
  2134   noncontiguous segments are added.
  2135 */
  2136 #define TOP_FOOT_SIZE\
  2137   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  2138 
  2139 
  2140 /* -------------------------------  Hooks -------------------------------- */
  2141 
  2142 /*
  2143   PREACTION should be defined to return 0 on success, and nonzero on
  2144   failure. If you are not using locking, you can redefine these to do
  2145   anything you like.
  2146 */
  2147 
  2148 #if USE_LOCKS
  2149 
  2150 /* Ensure locks are initialized */
  2151 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  2152 
  2153 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  2154 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2155 #else /* USE_LOCKS */
  2156 
  2157 #ifndef PREACTION
  2158 #define PREACTION(M) (0)
  2159 #endif  /* PREACTION */
  2160 
  2161 #ifndef POSTACTION
  2162 #define POSTACTION(M)
  2163 #endif  /* POSTACTION */
  2164 
  2165 #endif /* USE_LOCKS */
  2166 
  2167 /*
  2168   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2169   USAGE_ERROR_ACTION is triggered on detected bad frees and
  2170   reallocs. The argument p is an address that might have triggered the
  2171   fault. It is ignored by the two predefined actions, but might be
  2172   useful in custom actions that try to help diagnose errors.
  2173 */
  2174 
  2175 #if PROCEED_ON_ERROR
  2176 
  2177 /* A count of the number of corruption errors causing resets */
  2178 int malloc_corruption_error_count;
  2179 
  2180 /* default corruption action */
  2181 static void reset_on_error(mstate m);
  2182 
  2183 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  2184 #define USAGE_ERROR_ACTION(m, p)
  2185 
  2186 #else /* PROCEED_ON_ERROR */
  2187 
  2188 #ifndef CORRUPTION_ERROR_ACTION
  2189 #define CORRUPTION_ERROR_ACTION(m) ABORT
  2190 #endif /* CORRUPTION_ERROR_ACTION */
  2191 
  2192 #ifndef USAGE_ERROR_ACTION
  2193 #define USAGE_ERROR_ACTION(m,p) ABORT
  2194 #endif /* USAGE_ERROR_ACTION */
  2195 
  2196 #endif /* PROCEED_ON_ERROR */
  2197 
  2198 /* -------------------------- Debugging setup ---------------------------- */
  2199 
  2200 #if ! DEBUG
  2201 
  2202 #define check_free_chunk(M,P)
  2203 #define check_inuse_chunk(M,P)
  2204 #define check_malloced_chunk(M,P,N)
  2205 #define check_mmapped_chunk(M,P)
  2206 #define check_malloc_state(M)
  2207 #define check_top_chunk(M,P)
  2208 
  2209 #else /* DEBUG */
  2210 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  2211 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  2212 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2213 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2214 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2215 #define check_malloc_state(M)       do_check_malloc_state(M)
  2216 
  2217 static void   do_check_any_chunk(mstate m, mchunkptr p);
  2218 static void   do_check_top_chunk(mstate m, mchunkptr p);
  2219 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  2220 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  2221 static void   do_check_free_chunk(mstate m, mchunkptr p);
  2222 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  2223 static void   do_check_tree(mstate m, tchunkptr t);
  2224 static void   do_check_treebin(mstate m, bindex_t i);
  2225 static void   do_check_smallbin(mstate m, bindex_t i);
  2226 static void   do_check_malloc_state(mstate m);
  2227 static int    bin_find(mstate m, mchunkptr x);
  2228 static size_t traverse_and_check(mstate m);
  2229 #endif /* DEBUG */
  2230 
  2231 /* ---------------------------- Indexing Bins ---------------------------- */
  2232 
  2233 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  2234 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  2235 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  2236 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  2237 
  2238 /* addressing by index. See above about smallbin repositioning */
  2239 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  2240 #define treebin_at(M,i)     (&((M)->treebins[i]))
  2241 
  2242 /* assign tree index for size S to variable I */
  2243 #if defined(__GNUC__) && defined(i386)
  2244 #define compute_tree_index(S, I)\
  2245 {\
  2246   size_t X = S >> TREEBIN_SHIFT;\
  2247   if (X == 0)\
  2248     I = 0;\
  2249   else if (X > 0xFFFF)\
  2250     I = NTREEBINS-1;\
  2251   else {\
  2252     unsigned int K;\
  2253     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  2254     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  2255   }\
  2256 }
  2257 #else /* GNUC */
  2258 #define compute_tree_index(S, I)\
  2259 {\
  2260   size_t X = S >> TREEBIN_SHIFT;\
  2261   if (X == 0)\
  2262     I = 0;\
  2263   else if (X > 0xFFFF)\
  2264     I = NTREEBINS-1;\
  2265   else {\
  2266     unsigned int Y = (unsigned int)X;\
  2267     unsigned int N = ((Y - 0x100) >> 16) & 8;\
  2268     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  2269     N += K;\
  2270     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  2271     K = 14 - N + ((Y <<= K) >> 15);\
  2272     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  2273   }\
  2274 }
  2275 #endif /* GNUC */
  2276 
  2277 /* Bit representing maximum resolved size in a treebin at i */
  2278 #define bit_for_tree_index(i) \
  2279    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  2280 
  2281 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
  2282 #define leftshift_for_tree_index(i) \
  2283    ((i == NTREEBINS-1)? 0 : \
  2284     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  2285 
  2286 /* The size of the smallest chunk held in bin with index i */
  2287 #define minsize_for_tree_index(i) \
  2288    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  2289    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  2290 
  2291 
  2292 /* ------------------------ Operations on bin maps ----------------------- */
  2293 
  2294 /* bit corresponding to given index */
  2295 #define idx2bit(i)              ((binmap_t)(1) << (i))
  2296 
  2297 /* Mark/Clear bits with given index */
  2298 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  2299 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  2300 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  2301 
  2302 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  2303 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  2304 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  2305 
  2306 /* index corresponding to given bit */
  2307 
  2308 #if defined(__GNUC__) && defined(i386)
  2309 #define compute_bit2idx(X, I)\
  2310 {\
  2311   unsigned int J;\
  2312   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  2313   I = (bindex_t)J;\
  2314 }
  2315 
  2316 #else /* GNUC */
  2317 #if  USE_BUILTIN_FFS
  2318 #define compute_bit2idx(X, I) I = ffs(X)-1
  2319 
  2320 #else /* USE_BUILTIN_FFS */
  2321 #define compute_bit2idx(X, I)\
  2322 {\
  2323   unsigned int Y = X - 1;\
  2324   unsigned int K = Y >> (16-4) & 16;\
  2325   unsigned int N = K;        Y >>= K;\
  2326   N += K = Y >> (8-3) &  8;  Y >>= K;\
  2327   N += K = Y >> (4-2) &  4;  Y >>= K;\
  2328   N += K = Y >> (2-1) &  2;  Y >>= K;\
  2329   N += K = Y >> (1-0) &  1;  Y >>= K;\
  2330   I = (bindex_t)(N + Y);\
  2331 }
  2332 #endif /* USE_BUILTIN_FFS */
  2333 #endif /* GNUC */
  2334 
  2335 /* isolate the least set bit of a bitmap */
  2336 #define least_bit(x)         ((x) & -(x))
  2337 
  2338 /* mask with all bits to left of least bit of x on */
  2339 #define left_bits(x)         ((x<<1) | -(x<<1))
  2340 
  2341 /* mask with all bits to left of or equal to least bit of x on */
  2342 #define same_or_left_bits(x) ((x) | -(x))
  2343 
  2344 
  2345 /* ----------------------- Runtime Check Support ------------------------- */
  2346 
  2347 /*
  2348   For security, the main invariant is that malloc/free/etc never
  2349   writes to a static address other than malloc_state, unless static
  2350   malloc_state itself has been corrupted, which cannot occur via
  2351   malloc (because of these checks). In essence this means that we
  2352   believe all pointers, sizes, maps etc held in malloc_state, but
  2353   check all of those linked or offsetted from other embedded data
  2354   structures.  These checks are interspersed with main code in a way
  2355   that tends to minimize their run-time cost.
  2356 
  2357   When FOOTERS is defined, in addition to range checking, we also
  2358   verify footer fields of inuse chunks, which can be used guarantee
  2359   that the mstate controlling malloc/free is intact.  This is a
  2360   streamlined version of the approach described by William Robertson
  2361   et al in "Run-time Detection of Heap-based Overflows" LISA'03
  2362   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  2363   of an inuse chunk holds the xor of its mstate and a random seed,
  2364   that is checked upon calls to free() and realloc().  This is
  2365   (probablistically) unguessable from outside the program, but can be
  2366   computed by any code successfully malloc'ing any chunk, so does not
  2367   itself provide protection against code that has already broken
  2368   security through some other means.  Unlike Robertson et al, we
  2369   always dynamically check addresses of all offset chunks (previous,
  2370   next, etc). This turns out to be cheaper than relying on hashes.
  2371 */
  2372 
  2373 #if !INSECURE
  2374 /* Check if address a is at least as high as any from MORECORE or MMAP */
  2375 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  2376 /* Check if address of next chunk n is higher than base chunk p */
  2377 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
  2378 /* Check if p has its cinuse bit on */
  2379 #define ok_cinuse(p)     cinuse(p)
  2380 /* Check if p has its pinuse bit on */
  2381 #define ok_pinuse(p)     pinuse(p)
  2382 
  2383 #else /* !INSECURE */
  2384 #define ok_address(M, a) (1)
  2385 #define ok_next(b, n)    (1)
  2386 #define ok_cinuse(p)     (1)
  2387 #define ok_pinuse(p)     (1)
  2388 #endif /* !INSECURE */
  2389 
  2390 #if (FOOTERS && !INSECURE)
  2391 /* Check if (alleged) mstate m has expected magic field */
  2392 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2393 #else  /* (FOOTERS && !INSECURE) */
  2394 #define ok_magic(M)      (1)
  2395 #endif /* (FOOTERS && !INSECURE) */
  2396 
  2397 
  2398 /* In gcc, use __builtin_expect to minimize impact of checks */
  2399 #if !INSECURE
  2400 #if defined(__GNUC__) && __GNUC__ >= 3
  2401 #define RTCHECK(e)  __builtin_expect(e, 1)
  2402 #else /* GNUC */
  2403 #define RTCHECK(e)  (e)
  2404 #endif /* GNUC */
  2405 #else /* !INSECURE */
  2406 #define RTCHECK(e)  (1)
  2407 #endif /* !INSECURE */
  2408 
  2409 /* macros to set up inuse chunks with or without footers */
  2410 
  2411 #if !FOOTERS
  2412 
  2413 #define mark_inuse_foot(M,p,s)
  2414 
  2415 /* Set cinuse bit and pinuse bit of next chunk */
  2416 #define set_inuse(M,p,s)\
  2417   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2418   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2419 
  2420 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  2421 #define set_inuse_and_pinuse(M,p,s)\
  2422   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2423   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2424 
  2425 /* Set size, cinuse and pinuse bit of this chunk */
  2426 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2427   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  2428 
  2429 #else /* FOOTERS */
  2430 
  2431 /* Set foot of inuse chunk to be xor of mstate and seed */
  2432 #define mark_inuse_foot(M,p,s)\
  2433   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  2434 
  2435 #define get_mstate_for(p)\
  2436   ((mstate)(((mchunkptr)((char*)(p) +\
  2437     (chunksize(p))))->prev_foot ^ mparams.magic))
  2438 
  2439 #define set_inuse(M,p,s)\
  2440   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2441   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  2442   mark_inuse_foot(M,p,s))
  2443 
  2444 #define set_inuse_and_pinuse(M,p,s)\
  2445   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2446   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  2447  mark_inuse_foot(M,p,s))
  2448 
  2449 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2450   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2451   mark_inuse_foot(M, p, s))
  2452 
  2453 #endif /* !FOOTERS */
  2454 
  2455 /* ---------------------------- setting mparams -------------------------- */
  2456 
  2457 /* Initialize mparams */
  2458 static int init_mparams(void) {
  2459   if (mparams.page_size == 0) {
  2460     size_t s;
  2461 
  2462     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2463     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2464 #if MORECORE_CONTIGUOUS
  2465     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  2466 #else  /* MORECORE_CONTIGUOUS */
  2467     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  2468 #endif /* MORECORE_CONTIGUOUS */
  2469 
  2470 #if (FOOTERS && !INSECURE)
  2471     {
  2472 #if USE_DEV_RANDOM
  2473       int fd;
  2474       unsigned char buf[sizeof(size_t)];
  2475       /* Try to use /dev/urandom, else fall back on using time */
  2476       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2477           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2478         s = *((size_t *) buf);
  2479         close(fd);
  2480       }
  2481       else
  2482 #endif /* USE_DEV_RANDOM */
  2483         s = (size_t)(time(0) ^ (size_t)0x55555555U);
  2484 
  2485       s |= (size_t)8U;    /* ensure nonzero */
  2486       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  2487 
  2488     }
  2489 #else /* (FOOTERS && !INSECURE) */
  2490     s = (size_t)0x58585858U;
  2491 #endif /* (FOOTERS && !INSECURE) */
  2492     ACQUIRE_MAGIC_INIT_LOCK();
  2493     if (mparams.magic == 0) {
  2494       mparams.magic = s;
  2495       /* Set up lock for main malloc area */
  2496       INITIAL_LOCK(&gm->mutex);
  2497       gm->mflags = mparams.default_mflags;
  2498     }
  2499     RELEASE_MAGIC_INIT_LOCK();
  2500 
  2501 #ifndef WIN32
  2502     mparams.page_size = malloc_getpagesize;
  2503     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  2504                            DEFAULT_GRANULARITY : mparams.page_size);
  2505 #else /* WIN32 */
  2506     {
  2507       SYSTEM_INFO system_info;
  2508       GetSystemInfo(&system_info);
  2509       mparams.page_size = system_info.dwPageSize;
  2510       mparams.granularity = system_info.dwAllocationGranularity;
  2511     }
  2512 #endif /* WIN32 */
  2513 
  2514     /* Sanity-check configuration:
  2515        size_t must be unsigned and as wide as pointer type.
  2516        ints must be at least 4 bytes.
  2517        alignment must be at least 8.
  2518        Alignment, min chunk size, and page size must all be powers of 2.
  2519     */
  2520     if ((sizeof(size_t) != sizeof(char*)) ||
  2521         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  2522         (sizeof(int) < 4)  ||
  2523         (MALLOC_ALIGNMENT < (size_t)8U) ||
  2524         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  2525         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  2526         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  2527         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  2528       ABORT;
  2529   }
  2530   return 0;
  2531 }
  2532 
  2533 /* support for mallopt */
  2534 static int change_mparam(int param_number, int value) {
  2535   size_t val = (size_t)value;
  2536   init_mparams();
  2537   switch(param_number) {
  2538   case M_TRIM_THRESHOLD:
  2539     mparams.trim_threshold = val;
  2540     return 1;
  2541   case M_GRANULARITY:
  2542     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  2543       mparams.granularity = val;
  2544       return 1;
  2545     }
  2546     else
  2547       return 0;
  2548   case M_MMAP_THRESHOLD:
  2549     mparams.mmap_threshold = val;
  2550     return 1;
  2551   default:
  2552     return 0;
  2553   }
  2554 }
  2555 
  2556 #if DEBUG
  2557 /* ------------------------- Debugging Support --------------------------- */
  2558 
  2559 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2560 static void do_check_any_chunk(mstate m, mchunkptr p) {
  2561   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2562   assert(ok_address(m, p));
  2563 }
  2564 
  2565 /* Check properties of top chunk */
  2566 static void do_check_top_chunk(mstate m, mchunkptr p) {
  2567   msegmentptr sp = segment_holding(m, (char*)p);
  2568   size_t  sz = chunksize(p);
  2569   assert(sp != 0);
  2570   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2571   assert(ok_address(m, p));
  2572   assert(sz == m->topsize);
  2573   assert(sz > 0);
  2574   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  2575   assert(pinuse(p));
  2576   assert(!next_pinuse(p));
  2577 }
  2578 
  2579 /* Check properties of (inuse) mmapped chunks */
  2580 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  2581   size_t  sz = chunksize(p);
  2582   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2583   assert(is_mmapped(p));
  2584   assert(use_mmap(m));
  2585   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2586   assert(ok_address(m, p));
  2587   assert(!is_small(sz));
  2588   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  2589   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2590   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  2591 }
  2592 
  2593 /* Check properties of inuse chunks */
  2594 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  2595   do_check_any_chunk(m, p);
  2596   assert(cinuse(p));
  2597   assert(next_pinuse(p));
  2598   /* If not pinuse and not mmapped, previous chunk has OK offset */
  2599   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2600   if (is_mmapped(p))
  2601     do_check_mmapped_chunk(m, p);
  2602 }
  2603 
  2604 /* Check properties of free chunks */
  2605 static void do_check_free_chunk(mstate m, mchunkptr p) {
  2606   size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2607   mchunkptr next = chunk_plus_offset(p, sz);
  2608   do_check_any_chunk(m, p);
  2609   assert(!cinuse(p));
  2610   assert(!next_pinuse(p));
  2611   assert (!is_mmapped(p));
  2612   if (p != m->dv && p != m->top) {
  2613     if (sz >= MIN_CHUNK_SIZE) {
  2614       assert((sz & CHUNK_ALIGN_MASK) == 0);
  2615       assert(is_aligned(chunk2mem(p)));
  2616       assert(next->prev_foot == sz);
  2617       assert(pinuse(p));
  2618       assert (next == m->top || cinuse(next));
  2619       assert(p->fd->bk == p);
  2620       assert(p->bk->fd == p);
  2621     }
  2622     else  /* markers are always of size SIZE_T_SIZE */
  2623       assert(sz == SIZE_T_SIZE);
  2624   }
  2625 }
  2626 
  2627 /* Check properties of malloced chunks at the point they are malloced */
  2628 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  2629   if (mem != 0) {
  2630     mchunkptr p = mem2chunk(mem);
  2631     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2632     do_check_inuse_chunk(m, p);
  2633     assert((sz & CHUNK_ALIGN_MASK) == 0);
  2634     assert(sz >= MIN_CHUNK_SIZE);
  2635     assert(sz >= s);
  2636     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2637     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2638   }
  2639 }
  2640 
  2641 /* Check a tree and its subtrees.  */
  2642 static void do_check_tree(mstate m, tchunkptr t) {
  2643   tchunkptr head = 0;
  2644   tchunkptr u = t;
  2645   bindex_t tindex = t->index;
  2646   size_t tsize = chunksize(t);
  2647   bindex_t idx;
  2648   compute_tree_index(tsize, idx);
  2649   assert(tindex == idx);
  2650   assert(tsize >= MIN_LARGE_SIZE);
  2651   assert(tsize >= minsize_for_tree_index(idx));
  2652   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  2653 
  2654   do { /* traverse through chain of same-sized nodes */
  2655     do_check_any_chunk(m, ((mchunkptr)u));
  2656     assert(u->index == tindex);
  2657     assert(chunksize(u) == tsize);
  2658     assert(!cinuse(u));
  2659     assert(!next_pinuse(u));
  2660     assert(u->fd->bk == u);
  2661     assert(u->bk->fd == u);
  2662     if (u->parent == 0) {
  2663       assert(u->child[0] == 0);
  2664       assert(u->child[1] == 0);
  2665     }
  2666     else {
  2667       assert(head == 0); /* only one node on chain has parent */
  2668       head = u;
  2669       assert(u->parent != u);
  2670       assert (u->parent->child[0] == u ||
  2671               u->parent->child[1] == u ||
  2672               *((tbinptr*)(u->parent)) == u);
  2673       if (u->child[0] != 0) {
  2674         assert(u->child[0]->parent == u);
  2675         assert(u->child[0] != u);
  2676         do_check_tree(m, u->child[0]);
  2677       }
  2678       if (u->child[1] != 0) {
  2679         assert(u->child[1]->parent == u);
  2680         assert(u->child[1] != u);
  2681         do_check_tree(m, u->child[1]);
  2682       }
  2683       if (u->child[0] != 0 && u->child[1] != 0) {
  2684         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2685       }
  2686     }
  2687     u = u->fd;
  2688   } while (u != t);
  2689   assert(head != 0);
  2690 }
  2691 
  2692 /*  Check all the chunks in a treebin.  */
  2693 static void do_check_treebin(mstate m, bindex_t i) {
  2694   tbinptr* tb = treebin_at(m, i);
  2695   tchunkptr t = *tb;
  2696   int empty = (m->treemap & (1U << i)) == 0;
  2697   if (t == 0)
  2698     assert(empty);
  2699   if (!empty)
  2700     do_check_tree(m, t);
  2701 }
  2702 
  2703 /*  Check all the chunks in a smallbin.  */
  2704 static void do_check_smallbin(mstate m, bindex_t i) {
  2705   sbinptr b = smallbin_at(m, i);
  2706   mchunkptr p = b->bk;
  2707   unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2708   if (p == b)
  2709     assert(empty);
  2710   if (!empty) {
  2711     for (; p != b; p = p->bk) {
  2712       size_t size = chunksize(p);
  2713       mchunkptr q;
  2714       /* each chunk claims to be free */
  2715       do_check_free_chunk(m, p);
  2716       /* chunk belongs in bin */
  2717       assert(small_index(size) == i);
  2718       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2719       /* chunk is followed by an inuse chunk */
  2720       q = next_chunk(p);
  2721       if (q->head != FENCEPOST_HEAD)
  2722         do_check_inuse_chunk(m, q);
  2723     }
  2724   }
  2725 }
  2726 
  2727 /* Find x in a bin. Used in other check functions. */
  2728 static int bin_find(mstate m, mchunkptr x) {
  2729   size_t size = chunksize(x);
  2730   if (is_small(size)) {
  2731     bindex_t sidx = small_index(size);
  2732     sbinptr b = smallbin_at(m, sidx);
  2733     if (smallmap_is_marked(m, sidx)) {
  2734       mchunkptr p = b;
  2735       do {
  2736         if (p == x)
  2737           return 1;
  2738       } while ((p = p->fd) != b);
  2739     }
  2740   }
  2741   else {
  2742     bindex_t tidx;
  2743     compute_tree_index(size, tidx);
  2744     if (treemap_is_marked(m, tidx)) {
  2745       tchunkptr t = *treebin_at(m, tidx);
  2746       size_t sizebits = size << leftshift_for_tree_index(tidx);
  2747       while (t != 0 && chunksize(t) != size) {
  2748         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  2749         sizebits <<= 1;
  2750       }
  2751       if (t != 0) {
  2752         tchunkptr u = t;
  2753         do {
  2754           if (u == (tchunkptr)x)
  2755             return 1;
  2756         } while ((u = u->fd) != t);
  2757       }
  2758     }
  2759   }
  2760   return 0;
  2761 }
  2762 
  2763 /* Traverse each chunk and check it; return total */
  2764 static size_t traverse_and_check(mstate m) {
  2765   size_t sum = 0;
  2766   if (is_initialized(m)) {
  2767     msegmentptr s = &m->seg;
  2768     sum += m->topsize + TOP_FOOT_SIZE;
  2769     while (s != 0) {
  2770       mchunkptr q = align_as_chunk(s->base);
  2771       mchunkptr lastq = 0;
  2772       assert(pinuse(q));
  2773       while (segment_holds(s, q) &&
  2774              q != m->top && q->head != FENCEPOST_HEAD) {
  2775         sum += chunksize(q);
  2776         if (cinuse(q)) {
  2777           assert(!bin_find(m, q));
  2778           do_check_inuse_chunk(m, q);
  2779         }
  2780         else {
  2781           assert(q == m->dv || bin_find(m, q));
  2782           assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  2783           do_check_free_chunk(m, q);
  2784         }
  2785         lastq = q;
  2786         q = next_chunk(q);
  2787       }
  2788       s = s->next;
  2789     }
  2790   }
  2791   return sum;
  2792 }
  2793 
  2794 /* Check all properties of malloc_state. */
  2795 static void do_check_malloc_state(mstate m) {
  2796   bindex_t i;
  2797   size_t total;
  2798   /* check bins */
  2799   for (i = 0; i < NSMALLBINS; ++i)
  2800     do_check_smallbin(m, i);
  2801   for (i = 0; i < NTREEBINS; ++i)
  2802     do_check_treebin(m, i);
  2803 
  2804   if (m->dvsize != 0) { /* check dv chunk */
  2805     do_check_any_chunk(m, m->dv);
  2806     assert(m->dvsize == chunksize(m->dv));
  2807     assert(m->dvsize >= MIN_CHUNK_SIZE);
  2808     assert(bin_find(m, m->dv) == 0);
  2809   }
  2810 
  2811   if (m->top != 0) {   /* check top chunk */
  2812     do_check_top_chunk(m, m->top);
  2813     assert(m->topsize == chunksize(m->top));
  2814     assert(m->topsize > 0);
  2815     assert(bin_find(m, m->top) == 0);
  2816   }
  2817 
  2818   total = traverse_and_check(m);
  2819   assert(total <= m->footprint);
  2820   assert(m->footprint <= m->max_footprint);
  2821 }
  2822 #endif /* DEBUG */
  2823 
  2824 /* ----------------------------- statistics ------------------------------ */
  2825 
  2826 #if !NO_MALLINFO
  2827 static struct mallinfo internal_mallinfo(mstate m) {
  2828   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  2829   if (!PREACTION(m)) {
  2830     check_malloc_state(m);
  2831     if (is_initialized(m)) {
  2832       size_t nfree = SIZE_T_ONE; /* top always free */
  2833       size_t mfree = m->topsize + TOP_FOOT_SIZE;
  2834       size_t sum = mfree;
  2835       msegmentptr s = &m->seg;
  2836       while (s != 0) {
  2837         mchunkptr q = align_as_chunk(s->base);
  2838         while (segment_holds(s, q) &&
  2839                q != m->top && q->head != FENCEPOST_HEAD) {
  2840           size_t sz = chunksize(q);
  2841           sum += sz;
  2842           if (!cinuse(q)) {
  2843             mfree += sz;
  2844             ++nfree;
  2845           }
  2846           q = next_chunk(q);
  2847         }
  2848         s = s->next;
  2849       }
  2850 
  2851       nm.arena    = sum;
  2852       nm.ordblks  = nfree;
  2853       nm.hblkhd   = m->footprint - sum;
  2854       nm.usmblks  = m->max_footprint;
  2855       nm.uordblks = m->footprint - mfree;
  2856       nm.fordblks = mfree;
  2857       nm.keepcost = m->topsize;
  2858     }
  2859 
  2860     POSTACTION(m);
  2861   }
  2862   return nm;
  2863 }
  2864 #endif /* !NO_MALLINFO */
  2865 
  2866 static void internal_malloc_stats(mstate m) {
  2867   if (!PREACTION(m)) {
  2868     size_t maxfp = 0;
  2869     size_t fp = 0;
  2870     size_t used = 0;
  2871     check_malloc_state(m);
  2872     if (is_initialized(m)) {
  2873       msegmentptr s = &m->seg;
  2874       maxfp = m->max_footprint;
  2875       fp = m->footprint;
  2876       used = fp - (m->topsize + TOP_FOOT_SIZE);
  2877 
  2878       while (s != 0) {
  2879         mchunkptr q = align_as_chunk(s->base);
  2880         while (segment_holds(s, q) &&
  2881                q != m->top && q->head != FENCEPOST_HEAD) {
  2882           if (!cinuse(q))
  2883             used -= chunksize(q);
  2884           q = next_chunk(q);
  2885         }
  2886         s = s->next;
  2887       }
  2888     }
  2889 
  2890 #ifndef LACKS_STDIO_H
  2891     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  2892     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  2893     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  2894 #endif
  2895 
  2896     POSTACTION(m);
  2897   }
  2898 }
  2899 
  2900 /* ----------------------- Operations on smallbins ----------------------- */
  2901 
  2902 /*
  2903   Various forms of linking and unlinking are defined as macros.  Even
  2904   the ones for trees, which are very long but have very short typical
  2905   paths.  This is ugly but reduces reliance on inlining support of
  2906   compilers.
  2907 */
  2908 
  2909 /* Link a free chunk into a smallbin  */
  2910 #define insert_small_chunk(M, P, S) {\
  2911   bindex_t I  = small_index(S);\
  2912   mchunkptr B = smallbin_at(M, I);\
  2913   mchunkptr F = B;\
  2914   assert(S >= MIN_CHUNK_SIZE);\
  2915   if (!smallmap_is_marked(M, I))\
  2916     mark_smallmap(M, I);\
  2917   else if (RTCHECK(ok_address(M, B->fd)))\
  2918     F = B->fd;\
  2919   else {\
  2920     CORRUPTION_ERROR_ACTION(M);\
  2921   }\
  2922   B->fd = P;\
  2923   F->bk = P;\
  2924   P->fd = F;\
  2925   P->bk = B;\
  2926 }
  2927 
  2928 /* Unlink a chunk from a smallbin  */
  2929 #define unlink_small_chunk(M, P, S) {\
  2930   mchunkptr F = P->fd;\
  2931   mchunkptr B = P->bk;\
  2932   bindex_t I = small_index(S);\
  2933   assert(P != B);\
  2934   assert(P != F);\
  2935   assert(chunksize(P) == small_index2size(I));\
  2936   if (F == B)\
  2937     clear_smallmap(M, I);\
  2938   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  2939                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  2940     F->bk = B;\
  2941     B->fd = F;\
  2942   }\
  2943   else {\
  2944     CORRUPTION_ERROR_ACTION(M);\
  2945   }\
  2946 }
  2947 
  2948 /* Unlink the first chunk from a smallbin */
  2949 #define unlink_first_small_chunk(M, B, P, I) {\
  2950   mchunkptr F = P->fd;\
  2951   assert(P != B);\
  2952   assert(P != F);\
  2953   assert(chunksize(P) == small_index2size(I));\
  2954   if (B == F)\
  2955     clear_smallmap(M, I);\
  2956   else if (RTCHECK(ok_address(M, F))) {\
  2957     B->fd = F;\
  2958     F->bk = B;\
  2959   }\
  2960   else {\
  2961     CORRUPTION_ERROR_ACTION(M);\
  2962   }\
  2963 }
  2964 
  2965 /* Replace dv node, binning the old one */
  2966 /* Used only when dvsize known to be small */
  2967 #define replace_dv(M, P, S) {\
  2968   size_t DVS = M->dvsize;\
  2969   if (DVS != 0) {\
  2970     mchunkptr DV = M->dv;\
  2971     assert(is_small(DVS));\
  2972     insert_small_chunk(M, DV, DVS);\
  2973   }\
  2974   M->dvsize = S;\
  2975   M->dv = P;\
  2976 }
  2977 
  2978 /* ------------------------- Operations on trees ------------------------- */
  2979 
  2980 /* Insert chunk into tree */
  2981 #define insert_large_chunk(M, X, S) {\
  2982   tbinptr* H;\
  2983   bindex_t I;\
  2984   compute_tree_index(S, I);\
  2985   H = treebin_at(M, I);\
  2986   X->index = I;\
  2987   X->child[0] = X->child[1] = 0;\
  2988   if (!treemap_is_marked(M, I)) {\
  2989     mark_treemap(M, I);\
  2990     *H = X;\
  2991     X->parent = (tchunkptr)H;\
  2992     X->fd = X->bk = X;\
  2993   }\
  2994   else {\
  2995     tchunkptr T = *H;\
  2996     size_t K = S << leftshift_for_tree_index(I);\
  2997     for (;;) {\
  2998       if (chunksize(T) != S) {\
  2999         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  3000         K <<= 1;\
  3001         if (*C != 0)\
  3002           T = *C;\
  3003         else if (RTCHECK(ok_address(M, C))) {\
  3004           *C = X;\
  3005           X->parent = T;\
  3006           X->fd = X->bk = X;\
  3007           break;\
  3008         }\
  3009         else {\
  3010           CORRUPTION_ERROR_ACTION(M);\
  3011           break;\
  3012         }\
  3013       }\
  3014       else {\
  3015         tchunkptr F = T->fd;\
  3016         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  3017           T->fd = F->bk = X;\
  3018           X->fd = F;\
  3019           X->bk = T;\
  3020           X->parent = 0;\
  3021           break;\
  3022         }\
  3023         else {\
  3024           CORRUPTION_ERROR_ACTION(M);\
  3025           break;\
  3026         }\
  3027       }\
  3028     }\
  3029   }\
  3030 }
  3031 
  3032 /*
  3033   Unlink steps:
  3034 
  3035   1. If x is a chained node, unlink it from its same-sized fd/bk links
  3036      and choose its bk node as its replacement.
  3037   2. If x was the last node of its size, but not a leaf node, it must
  3038      be replaced with a leaf node (not merely one with an open left or
  3039      right), to make sure that lefts and rights of descendents
  3040      correspond properly to bit masks.  We use the rightmost descendent
  3041      of x.  We could use any other leaf, but this is easy to locate and
  3042      tends to counteract removal of leftmosts elsewhere, and so keeps
  3043      paths shorter than minimally guaranteed.  This doesn't loop much
  3044      because on average a node in a tree is near the bottom.
  3045   3. If x is the base of a chain (i.e., has parent links) relink
  3046      x's parent and children to x's replacement (or null if none).
  3047 */
  3048 
  3049 #define unlink_large_chunk(M, X) {\
  3050   tchunkptr XP = X->parent;\
  3051   tchunkptr R;\
  3052   if (X->bk != X) {\
  3053     tchunkptr F = X->fd;\
  3054     R = X->bk;\
  3055     if (RTCHECK(ok_address(M, F))) {\
  3056       F->bk = R;\
  3057       R->fd = F;\
  3058     }\
  3059     else {\
  3060       CORRUPTION_ERROR_ACTION(M);\
  3061     }\
  3062   }\
  3063   else {\
  3064     tchunkptr* RP;\
  3065     if (((R = *(RP = &(X->child[1]))) != 0) ||\
  3066         ((R = *(RP = &(X->child[0]))) != 0)) {\
  3067       tchunkptr* CP;\
  3068       while ((*(CP = &(R->child[1])) != 0) ||\
  3069              (*(CP = &(R->child[0])) != 0)) {\
  3070         R = *(RP = CP);\
  3071       }\
  3072       if (RTCHECK(ok_address(M, RP)))\
  3073         *RP = 0;\
  3074       else {\
  3075         CORRUPTION_ERROR_ACTION(M);\
  3076       }\
  3077     }\
  3078   }\
  3079   if (XP != 0) {\
  3080     tbinptr* H = treebin_at(M, X->index);\
  3081     if (X == *H) {\
  3082       if ((*H = R) == 0) \
  3083         clear_treemap(M, X->index);\
  3084     }\
  3085     else if (RTCHECK(ok_address(M, XP))) {\
  3086       if (XP->child[0] == X) \
  3087         XP->child[0] = R;\
  3088       else \
  3089         XP->child[1] = R;\
  3090     }\
  3091     else\
  3092       CORRUPTION_ERROR_ACTION(M);\
  3093     if (R != 0) {\
  3094       if (RTCHECK(ok_address(M, R))) {\
  3095         tchunkptr C0, C1;\
  3096         R->parent = XP;\
  3097         if ((C0 = X->child[0]) != 0) {\
  3098           if (RTCHECK(ok_address(M, C0))) {\
  3099             R->child[0] = C0;\
  3100             C0->parent = R;\
  3101           }\
  3102           else\
  3103             CORRUPTION_ERROR_ACTION(M);\
  3104         }\
  3105         if ((C1 = X->child[1]) != 0) {\
  3106           if (RTCHECK(ok_address(M, C1))) {\
  3107             R->child[1] = C1;\
  3108             C1->parent = R;\
  3109           }\
  3110           else\
  3111             CORRUPTION_ERROR_ACTION(M);\
  3112         }\
  3113       }\
  3114       else\
  3115         CORRUPTION_ERROR_ACTION(M);\
  3116     }\
  3117   }\
  3118 }
  3119 
  3120 /* Relays to large vs small bin operations */
  3121 
  3122 #define insert_chunk(M, P, S)\
  3123   if (is_small(S)) insert_small_chunk(M, P, S)\
  3124   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  3125 
  3126 #define unlink_chunk(M, P, S)\
  3127   if (is_small(S)) unlink_small_chunk(M, P, S)\
  3128   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  3129 
  3130 
  3131 /* Relays to internal calls to malloc/free from realloc, memalign etc */
  3132 
  3133 #if ONLY_MSPACES
  3134 #define internal_malloc(m, b) mspace_malloc(m, b)
  3135 #define internal_free(m, mem) mspace_free(m,mem);
  3136 #else /* ONLY_MSPACES */
  3137 #if MSPACES
  3138 #define internal_malloc(m, b)\
  3139    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  3140 #define internal_free(m, mem)\
  3141    if (m == gm) dlfree(mem); else mspace_free(m,mem);
  3142 #else /* MSPACES */
  3143 #define internal_malloc(m, b) dlmalloc(b)
  3144 #define internal_free(m, mem) dlfree(mem)
  3145 #endif /* MSPACES */
  3146 #endif /* ONLY_MSPACES */
  3147 
  3148 /* -----------------------  Direct-mmapping chunks ----------------------- */
  3149 
  3150 /*
  3151   Directly mmapped chunks are set up with an offset to the start of
  3152   the mmapped region stored in the prev_foot field of the chunk. This
  3153   allows reconstruction of the required argument to MUNMAP when freed,
  3154   and also allows adjustment of the returned chunk to meet alignment
  3155   requirements (especially in memalign).  There is also enough space
  3156   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3157   the PINUSE bit so frees can be checked.
  3158 */
  3159 
  3160 /* Malloc using mmap */
  3161 static void* mmap_alloc(mstate m, size_t nb) {
  3162   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3163   if (mmsize > nb) {     /* Check for wrap around 0 */
  3164     char* mm = (char*)(DIRECT_MMAP(mmsize));
  3165     if (mm != CMFAIL) {
  3166       size_t offset = align_offset(chunk2mem(mm));
  3167       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3168       mchunkptr p = (mchunkptr)(mm + offset);
  3169       p->prev_foot = offset | IS_MMAPPED_BIT;
  3170       (p)->head = (psize|CINUSE_BIT);
  3171       mark_inuse_foot(m, p, psize);
  3172       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3173       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  3174 
  3175       if (mm < m->least_addr)
  3176         m->least_addr = mm;
  3177       if ((m->footprint += mmsize) > m->max_footprint)
  3178         m->max_footprint = m->footprint;
  3179       assert(is_aligned(chunk2mem(p)));
  3180       check_mmapped_chunk(m, p);
  3181       return chunk2mem(p);
  3182     }
  3183   }
  3184   return 0;
  3185 }
  3186 
  3187 /* Realloc using mmap */
  3188 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  3189   size_t oldsize = chunksize(oldp);
  3190   if (is_small(nb)) /* Can't shrink mmap regions below small size */
  3191     return 0;
  3192   /* Keep old chunk if big enough but not too big */
  3193   if (oldsize >= nb + SIZE_T_SIZE &&
  3194       (oldsize - nb) <= (mparams.granularity << 1))
  3195     return oldp;
  3196   else {
  3197     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  3198     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  3199     size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  3200                                          CHUNK_ALIGN_MASK);
  3201     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
  3202                                   oldmmsize, newmmsize, 1);
  3203     if (cp != CMFAIL) {
  3204       mchunkptr newp = (mchunkptr)(cp + offset);
  3205       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  3206       newp->head = (psize|CINUSE_BIT);
  3207       mark_inuse_foot(m, newp, psize);
  3208       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  3209       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
  3210 
  3211       if (cp < m->least_addr)
  3212         m->least_addr = cp;
  3213       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  3214         m->max_footprint = m->footprint;
  3215       check_mmapped_chunk(m, newp);
  3216       return newp;
  3217     }
  3218   }
  3219   return 0;
  3220 }
  3221 
  3222 /* -------------------------- mspace management -------------------------- */
  3223 
  3224 /* Initialize top chunk and its size */
  3225 static void init_top(mstate m, mchunkptr p, size_t psize) {
  3226   /* Ensure alignment */
  3227   size_t offset = align_offset(chunk2mem(p));
  3228   p = (mchunkptr)((char*)p + offset);
  3229   psize -= offset;
  3230 
  3231   m->top = p;
  3232   m->topsize = psize;
  3233   p->head = psize | PINUSE_BIT;
  3234   /* set size of fake trailing chunk holding overhead space only once */
  3235   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3236   m->trim_check = mparams.trim_threshold; /* reset on each update */
  3237 }
  3238 
  3239 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3240 static void init_bins(mstate m) {
  3241   /* Establish circular links for smallbins */
  3242   bindex_t i;
  3243   for (i = 0; i < NSMALLBINS; ++i) {
  3244     sbinptr bin = smallbin_at(m,i);
  3245     bin->fd = bin->bk = bin;
  3246   }
  3247 }
  3248 
  3249 #if PROCEED_ON_ERROR
  3250 
  3251 /* default corruption action */
  3252 static void reset_on_error(mstate m) {
  3253   int i;
  3254   ++malloc_corruption_error_count;
  3255   /* Reinitialize fields to forget about all memory */
  3256   m->smallbins = m->treebins = 0;
  3257   m->dvsize = m->topsize = 0;
  3258   m->seg.base = 0;
  3259   m->seg.size = 0;
  3260   m->seg.next = 0;
  3261   m->top = m->dv = 0;
  3262   for (i = 0; i < NTREEBINS; ++i)
  3263     *treebin_at(m, i) = 0;
  3264   init_bins(m);
  3265 }
  3266 #endif /* PROCEED_ON_ERROR */
  3267 
  3268 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3269 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  3270                            size_t nb) {
  3271   mchunkptr p = align_as_chunk(newbase);
  3272   mchunkptr oldfirst = align_as_chunk(oldbase);
  3273   size_t psize = (char*)oldfirst - (char*)p;
  3274   mchunkptr q = chunk_plus_offset(p, nb);
  3275   size_t qsize = psize - nb;
  3276   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3277 
  3278   assert((char*)oldfirst > (char*)q);
  3279   assert(pinuse(oldfirst));
  3280   assert(qsize >= MIN_CHUNK_SIZE);
  3281 
  3282   /* consolidate remainder with first chunk of old base */
  3283   if (oldfirst == m->top) {
  3284     size_t tsize = m->topsize += qsize;
  3285     m->top = q;
  3286     q->head = tsize | PINUSE_BIT;
  3287     check_top_chunk(m, q);
  3288   }
  3289   else if (oldfirst == m->dv) {
  3290     size_t dsize = m->dvsize += qsize;
  3291     m->dv = q;
  3292     set_size_and_pinuse_of_free_chunk(q, dsize);
  3293   }
  3294   else {
  3295     if (!cinuse(oldfirst)) {
  3296       size_t nsize = chunksize(oldfirst);
  3297       unlink_chunk(m, oldfirst, nsize);
  3298       oldfirst = chunk_plus_offset(oldfirst, nsize);
  3299       qsize += nsize;
  3300     }
  3301     set_free_with_pinuse(q, qsize, oldfirst);
  3302     insert_chunk(m, q, qsize);
  3303     check_free_chunk(m, q);
  3304   }
  3305 
  3306   check_malloced_chunk(m, chunk2mem(p), nb);
  3307   return chunk2mem(p);
  3308 }
  3309 
  3310 
  3311 /* Add a segment to hold a new noncontiguous region */
  3312 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  3313   /* Determine locations and sizes of segment, fenceposts, old top */
  3314   char* old_top = (char*)m->top;
  3315   msegmentptr oldsp = segment_holding(m, old_top);
  3316   char* old_end = oldsp->base + oldsp->size;
  3317   size_t ssize = pad_request(sizeof(struct malloc_segment));
  3318   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3319   size_t offset = align_offset(chunk2mem(rawsp));
  3320   char* asp = rawsp + offset;
  3321   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  3322   mchunkptr sp = (mchunkptr)csp;
  3323   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  3324   mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3325   mchunkptr p = tnext;
  3326   int nfences = 0;
  3327 
  3328   /* reset top to new space */
  3329   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3330 
  3331   /* Set up segment record */
  3332   assert(is_aligned(ss));
  3333   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3334   *ss = m->seg; /* Push current record */
  3335   m->seg.base = tbase;
  3336   m->seg.size = tsize;
  3337   m->seg.sflags = mmapped;
  3338   m->seg.next = ss;
  3339 
  3340   /* Insert trailing fenceposts */
  3341   for (;;) {
  3342     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3343     p->head = FENCEPOST_HEAD;
  3344     ++nfences;
  3345     if ((char*)(&(nextp->head)) < old_end)
  3346       p = nextp;
  3347     else
  3348       break;
  3349   }
  3350   assert(nfences >= 2);
  3351 
  3352   /* Insert the rest of old top into a bin as an ordinary free chunk */
  3353   if (csp != old_top) {
  3354     mchunkptr q = (mchunkptr)old_top;
  3355     size_t psize = csp - old_top;
  3356     mchunkptr tn = chunk_plus_offset(q, psize);
  3357     set_free_with_pinuse(q, psize, tn);
  3358     insert_chunk(m, q, psize);
  3359   }
  3360 
  3361   check_top_chunk(m, m->top);
  3362 }
  3363 
  3364 /* -------------------------- System allocation -------------------------- */
  3365 
  3366 /* Get memory from system using MORECORE or MMAP */
  3367 static void* sys_alloc(mstate m, size_t nb) {
  3368   char* tbase = CMFAIL;
  3369   size_t tsize = 0;
  3370   flag_t mmap_flag = 0;
  3371 
  3372   init_mparams();
  3373 
  3374   /* Directly map large chunks */
  3375   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3376     void* mem = mmap_alloc(m, nb);
  3377     if (mem != 0)
  3378       return mem;
  3379   }
  3380 
  3381   /*
  3382     Try getting memory in any of three ways (in most-preferred to
  3383     least-preferred order):
  3384     1. A call to MORECORE that can normally contiguously extend memory.
  3385        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3386        or main space is mmapped or a previous contiguous call failed)
  3387     2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3388        Note that under the default settings, if MORECORE is unable to
  3389        fulfill a request, and HAVE_MMAP is true, then mmap is
  3390        used as a noncontiguous system allocator. This is a useful backup
  3391        strategy for systems with holes in address spaces -- in this case
  3392        sbrk cannot contiguously expand the heap, but mmap may be able to
  3393        find space.
  3394     3. A call to MORECORE that cannot usually contiguously extend memory.
  3395        (disabled if not HAVE_MORECORE)
  3396   */
  3397 
  3398   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3399     char* br = CMFAIL;
  3400     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  3401     size_t asize = 0;
  3402     ACQUIRE_MORECORE_LOCK();
  3403 
  3404     if (ss == 0) {  /* First time through or recovery */
  3405       char* base = (char*)CALL_MORECORE(0);
  3406       if (base != CMFAIL) {
  3407         asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3408         /* Adjust to end on a page boundary */
  3409         if (!is_page_aligned(base))
  3410           asize += (page_align((size_t)base) - (size_t)base);
  3411         /* Can't call MORECORE if size is negative when treated as signed */
  3412         if (asize < HALF_MAX_SIZE_T &&
  3413             (br = (char*)(CALL_MORECORE(asize))) == base) {
  3414           tbase = base;
  3415           tsize = asize;
  3416         }
  3417       }
  3418     }
  3419     else {
  3420       /* Subtract out existing available top space from MORECORE request. */
  3421       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
  3422       /* Use mem here only if it did continuously extend old space */
  3423       if (asize < HALF_MAX_SIZE_T &&
  3424           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  3425         tbase = br;
  3426         tsize = asize;
  3427       }
  3428     }
  3429 
  3430     if (tbase == CMFAIL) {    /* Cope with partial failure */
  3431       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  3432         if (asize < HALF_MAX_SIZE_T &&
  3433             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3434           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
  3435           if (esize < HALF_MAX_SIZE_T) {
  3436             char* end = (char*)CALL_MORECORE(esize);
  3437             if (end != CMFAIL)
  3438               asize += esize;
  3439             else {            /* Can't use; try to release */
  3440               end = (char*)CALL_MORECORE(-asize);
  3441               br = CMFAIL;
  3442             }
  3443           }
  3444         }
  3445       }
  3446       if (br != CMFAIL) {    /* Use the space we did get */
  3447         tbase = br;
  3448         tsize = asize;
  3449       }
  3450       else
  3451         disable_contiguous(m); /* Don't try contiguous path in the future */
  3452     }
  3453 
  3454     RELEASE_MORECORE_LOCK();
  3455   }
  3456 
  3457   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  3458     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  3459     size_t rsize = granularity_align(req);
  3460     if (rsize > nb) { /* Fail if wraps around zero */
  3461       char* mp = (char*)(CALL_MMAP(rsize));
  3462       if (mp != CMFAIL) {
  3463         tbase = mp;
  3464         tsize = rsize;
  3465         mmap_flag = IS_MMAPPED_BIT;
  3466       }
  3467     }
  3468   }
  3469 
  3470   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
  3471     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3472     if (asize < HALF_MAX_SIZE_T) {
  3473       char* br = CMFAIL;
  3474       char* end = CMFAIL;
  3475       ACQUIRE_MORECORE_LOCK();
  3476       br = (char*)(CALL_MORECORE(asize));
  3477       end = (char*)(CALL_MORECORE(0));
  3478       RELEASE_MORECORE_LOCK();
  3479       if (br != CMFAIL && end != CMFAIL && br < end) {
  3480         size_t ssize = end - br;
  3481         if (ssize > nb + TOP_FOOT_SIZE) {
  3482           tbase = br;
  3483           tsize = ssize;
  3484         }
  3485       }
  3486     }
  3487   }
  3488 
  3489   if (tbase != CMFAIL) {
  3490 
  3491     if ((m->footprint += tsize) > m->max_footprint)
  3492       m->max_footprint = m->footprint;
  3493 
  3494     if (!is_initialized(m)) { /* first-time initialization */
  3495       m->seg.base = m->least_addr = tbase;
  3496       m->seg.size = tsize;
  3497       m->seg.sflags = mmap_flag;
  3498       m->magic = mparams.magic;
  3499       init_bins(m);
  3500       if (is_global(m)) 
  3501         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3502       else {
  3503         /* Offset top by embedded malloc_state */
  3504         mchunkptr mn = next_chunk(mem2chunk(m));
  3505         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
  3506       }
  3507     }
  3508 
  3509     else {
  3510       /* Try to merge with an existing segment */
  3511       msegmentptr sp = &m->seg;
  3512       while (sp != 0 && tbase != sp->base + sp->size)
  3513         sp = sp->next;
  3514       if (sp != 0 &&
  3515           !is_extern_segment(sp) &&
  3516           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
  3517           segment_holds(sp, m->top)) { /* append */
  3518         sp->size += tsize;
  3519         init_top(m, m->top, m->topsize + tsize);
  3520       }
  3521       else {
  3522         if (tbase < m->least_addr)
  3523           m->least_addr = tbase;
  3524         sp = &m->seg;
  3525         while (sp != 0 && sp->base != tbase + tsize)
  3526           sp = sp->next;
  3527         if (sp != 0 &&
  3528             !is_extern_segment(sp) &&
  3529             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  3530           char* oldbase = sp->base;
  3531           sp->base = tbase;
  3532           sp->size += tsize;
  3533           return prepend_alloc(m, tbase, oldbase, nb);
  3534         }
  3535         else
  3536           add_segment(m, tbase, tsize, mmap_flag);
  3537       }
  3538     }
  3539 
  3540     if (nb < m->topsize) { /* Allocate from new or extended top space */
  3541       size_t rsize = m->topsize -= nb;
  3542       mchunkptr p = m->top;
  3543       mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3544       r->head = rsize | PINUSE_BIT;
  3545       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3546       check_top_chunk(m, m->top);
  3547       check_malloced_chunk(m, chunk2mem(p), nb);
  3548       return chunk2mem(p);
  3549     }
  3550   }
  3551 
  3552   MALLOC_FAILURE_ACTION;
  3553   return 0;
  3554 }
  3555 
  3556 /* -----------------------  system deallocation -------------------------- */
  3557 
  3558 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3559 static size_t release_unused_segments(mstate m) {
  3560   size_t released = 0;
  3561   msegmentptr pred = &m->seg;
  3562   msegmentptr sp = pred->next;
  3563   while (sp != 0) {
  3564     char* base = sp->base;
  3565     size_t size = sp->size;
  3566     msegmentptr next = sp->next;
  3567     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3568       mchunkptr p = align_as_chunk(base);
  3569       size_t psize = chunksize(p);
  3570       /* Can unmap if first chunk holds entire segment and not pinned */
  3571       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
  3572         tchunkptr tp = (tchunkptr)p;
  3573         assert(segment_holds(sp, (char*)sp));
  3574         if (p == m->dv) {
  3575           m->dv = 0;
  3576           m->dvsize = 0;
  3577         }
  3578         else {
  3579           unlink_large_chunk(m, tp);
  3580         }
  3581         if (CALL_MUNMAP(base, size) == 0) {
  3582           released += size;
  3583           m->footprint -= size;
  3584           /* unlink obsoleted record */
  3585           sp = pred;
  3586           sp->next = next;
  3587         }
  3588         else { /* back out if cannot unmap */
  3589           insert_large_chunk(m, tp, psize);
  3590         }
  3591       }
  3592     }
  3593     pred = sp;
  3594     sp = next;
  3595   }
  3596   return released;
  3597 }
  3598 
  3599 static int sys_trim(mstate m, size_t pad) {
  3600   size_t released = 0;
  3601   if (pad < MAX_REQUEST && is_initialized(m)) {
  3602     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
  3603 
  3604     if (m->topsize > pad) {
  3605       /* Shrink top space in granularity-size units, keeping at least one */
  3606       size_t unit = mparams.granularity;
  3607       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  3608                       SIZE_T_ONE) * unit;
  3609       msegmentptr sp = segment_holding(m, (char*)m->top);
  3610 
  3611       if (!is_extern_segment(sp)) {
  3612         if (is_mmapped_segment(sp)) {
  3613           if (HAVE_MMAP &&
  3614               sp->size >= extra &&
  3615               !has_segment_link(m, sp)) { /* can't shrink if pinned */
  3616             size_t newsize = sp->size - extra;
  3617             /* Prefer mremap, fall back to munmap */
  3618             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
  3619                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  3620               released = extra;
  3621             }
  3622           }
  3623         }
  3624         else if (HAVE_MORECORE) {
  3625           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
  3626             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  3627           ACQUIRE_MORECORE_LOCK();
  3628           {
  3629             /* Make sure end of memory is where we last set it. */
  3630             char* old_br = (char*)(CALL_MORECORE(0));
  3631             if (old_br == sp->base + sp->size) {
  3632               char* rel_br = (char*)(CALL_MORECORE(-extra));
  3633               char* new_br = (char*)(CALL_MORECORE(0));
  3634               if (rel_br != CMFAIL && new_br < old_br)
  3635                 released = old_br - new_br;
  3636             }
  3637           }
  3638           RELEASE_MORECORE_LOCK();
  3639         }
  3640       }
  3641 
  3642       if (released != 0) {
  3643         sp->size -= released;
  3644         m->footprint -= released;
  3645         init_top(m, m->top, m->topsize - released);
  3646         check_top_chunk(m, m->top);
  3647       }
  3648     }
  3649 
  3650     /* Unmap any unused mmapped segments */
  3651     if (HAVE_MMAP) 
  3652       released += release_unused_segments(m);
  3653 
  3654     /* On failure, disable autotrim to avoid repeated failed future calls */
  3655     if (released == 0)
  3656       m->trim_check = MAX_SIZE_T;
  3657   }
  3658 
  3659   return (released != 0)? 1 : 0;
  3660 }
  3661 
  3662 /* ---------------------------- malloc support --------------------------- */
  3663 
  3664 /* allocate a large request from the best fitting chunk in a treebin */
  3665 static void* tmalloc_large(mstate m, size_t nb) {
  3666   tchunkptr v = 0;
  3667   size_t rsize = -nb; /* Unsigned negation */
  3668   tchunkptr t;
  3669   bindex_t idx;
  3670   compute_tree_index(nb, idx);
  3671 
  3672   if ((t = *treebin_at(m, idx)) != 0) {
  3673     /* Traverse tree for this bin looking for node with size == nb */
  3674     size_t sizebits = nb << leftshift_for_tree_index(idx);
  3675     tchunkptr rst = 0;  /* The deepest untaken right subtree */
  3676     for (;;) {
  3677       tchunkptr rt;
  3678       size_t trem = chunksize(t) - nb;
  3679       if (trem < rsize) {
  3680         v = t;
  3681         if ((rsize = trem) == 0)
  3682           break;
  3683       }
  3684       rt = t->child[1];
  3685       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  3686       if (rt != 0 && rt != t)
  3687         rst = rt;
  3688       if (t == 0) {
  3689         t = rst; /* set t to least subtree holding sizes > nb */
  3690         break;
  3691       }
  3692       sizebits <<= 1;
  3693     }
  3694   }
  3695 
  3696   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
  3697     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  3698     if (leftbits != 0) {
  3699       bindex_t i;
  3700       binmap_t leastbit = least_bit(leftbits);
  3701       compute_bit2idx(leastbit, i);
  3702       t = *treebin_at(m, i);
  3703     }
  3704   }
  3705 
  3706   while (t != 0) { /* find smallest of tree or subtree */
  3707     size_t trem = chunksize(t) - nb;
  3708     if (trem < rsize) {
  3709       rsize = trem;
  3710       v = t;
  3711     }
  3712     t = leftmost_child(t);
  3713   }
  3714 
  3715   /*  If dv is a better fit, return 0 so malloc will use it */
  3716   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
  3717     if (RTCHECK(ok_address(m, v))) { /* split */
  3718       mchunkptr r = chunk_plus_offset(v, nb);
  3719       assert(chunksize(v) == rsize + nb);
  3720       if (RTCHECK(ok_next(v, r))) {
  3721         unlink_large_chunk(m, v);
  3722         if (rsize < MIN_CHUNK_SIZE)
  3723           set_inuse_and_pinuse(m, v, (rsize + nb));
  3724         else {
  3725           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3726           set_size_and_pinuse_of_free_chunk(r, rsize);
  3727           insert_chunk(m, r, rsize);
  3728         }
  3729         return chunk2mem(v);
  3730       }
  3731     }
  3732     CORRUPTION_ERROR_ACTION(m);
  3733   }
  3734   return 0;
  3735 }
  3736 
  3737 /* allocate a small request from the best fitting chunk in a treebin */
  3738 static void* tmalloc_small(mstate m, size_t nb) {
  3739   tchunkptr t, v;
  3740   size_t rsize;
  3741   bindex_t i;
  3742   binmap_t leastbit = least_bit(m->treemap);
  3743   compute_bit2idx(leastbit, i);
  3744 
  3745   v = t = *treebin_at(m, i);
  3746   rsize = chunksize(t) - nb;
  3747 
  3748   while ((t = leftmost_child(t)) != 0) {
  3749     size_t trem = chunksize(t) - nb;
  3750     if (trem < rsize) {
  3751       rsize = trem;
  3752       v = t;
  3753     }
  3754   }
  3755 
  3756   if (RTCHECK(ok_address(m, v))) {
  3757     mchunkptr r = chunk_plus_offset(v, nb);
  3758     assert(chunksize(v) == rsize + nb);
  3759     if (RTCHECK(ok_next(v, r))) {
  3760       unlink_large_chunk(m, v);
  3761       if (rsize < MIN_CHUNK_SIZE)
  3762         set_inuse_and_pinuse(m, v, (rsize + nb));
  3763       else {
  3764         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3765         set_size_and_pinuse_of_free_chunk(r, rsize);
  3766         replace_dv(m, r, rsize);
  3767       }
  3768       return chunk2mem(v);
  3769     }
  3770   }
  3771 
  3772   CORRUPTION_ERROR_ACTION(m);
  3773   return 0;
  3774 }
  3775 
  3776 /* --------------------------- realloc support --------------------------- */
  3777 
  3778 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
  3779   if (bytes >= MAX_REQUEST) {
  3780     MALLOC_FAILURE_ACTION;
  3781     return 0;
  3782   }
  3783   if (!PREACTION(m)) {
  3784     mchunkptr oldp = mem2chunk(oldmem);
  3785     size_t oldsize = chunksize(oldp);
  3786     mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3787     mchunkptr newp = 0;
  3788     void* extra = 0;
  3789 
  3790     /* Try to either shrink or extend into top. Else malloc-copy-free */
  3791 
  3792     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3793                 ok_next(oldp, next) && ok_pinuse(next))) {
  3794       size_t nb = request2size(bytes);
  3795       if (is_mmapped(oldp))
  3796         newp = mmap_resize(m, oldp, nb);
  3797       else if (oldsize >= nb) { /* already big enough */
  3798         size_t rsize = oldsize - nb;
  3799         newp = oldp;
  3800         if (rsize >= MIN_CHUNK_SIZE) {
  3801           mchunkptr remainder = chunk_plus_offset(newp, nb);
  3802           set_inuse(m, newp, nb);
  3803           set_inuse(m, remainder, rsize);
  3804           extra = chunk2mem(remainder);
  3805         }
  3806       }
  3807       else if (next == m->top && oldsize + m->topsize > nb) {
  3808         /* Expand into top */
  3809         size_t newsize = oldsize + m->topsize;
  3810         size_t newtopsize = newsize - nb;
  3811         mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3812         set_inuse(m, oldp, nb);
  3813         newtop->head = newtopsize |PINUSE_BIT;
  3814         m->top = newtop;
  3815         m->topsize = newtopsize;
  3816         newp = oldp;
  3817       }
  3818     }
  3819     else {
  3820       USAGE_ERROR_ACTION(m, oldmem);
  3821       POSTACTION(m);
  3822       return 0;
  3823     }
  3824 
  3825     POSTACTION(m);
  3826 
  3827     if (newp != 0) {
  3828       if (extra != 0) {
  3829         internal_free(m, extra);
  3830       }
  3831       check_inuse_chunk(m, newp);
  3832       return chunk2mem(newp);
  3833     }
  3834     else {
  3835       void* newmem = internal_malloc(m, bytes);
  3836       if (newmem != 0) {
  3837         size_t oc = oldsize - overhead_for(oldp);
  3838         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
  3839         internal_free(m, oldmem);
  3840       }
  3841       return newmem;
  3842     }
  3843   }
  3844   return 0;
  3845 }
  3846 
  3847 /* --------------------------- memalign support -------------------------- */
  3848 
  3849 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
  3850   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
  3851     return internal_malloc(m, bytes);
  3852   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
  3853     alignment = MIN_CHUNK_SIZE;
  3854   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
  3855     size_t a = MALLOC_ALIGNMENT << 1;
  3856     while (a < alignment) a <<= 1;
  3857     alignment = a;
  3858   }
  3859   
  3860   if (bytes >= MAX_REQUEST - alignment) {
  3861     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
  3862       MALLOC_FAILURE_ACTION;
  3863     }
  3864   }
  3865   else {
  3866     size_t nb = request2size(bytes);
  3867     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  3868     char* mem = (char*)internal_malloc(m, req);
  3869     if (mem != 0) {
  3870       void* leader = 0;
  3871       void* trailer = 0;
  3872       mchunkptr p = mem2chunk(mem);
  3873 
  3874       if (PREACTION(m)) return 0;
  3875       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
  3876         /*
  3877           Find an aligned spot inside chunk.  Since we need to give
  3878           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  3879           the first calculation places us at a spot with less than
  3880           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  3881           We've allocated enough total room so that this is always
  3882           possible.
  3883         */
  3884         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
  3885                                                        alignment -
  3886                                                        SIZE_T_ONE)) &
  3887                                              -alignment));
  3888         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
  3889           br : br+alignment;
  3890         mchunkptr newp = (mchunkptr)pos;
  3891         size_t leadsize = pos - (char*)(p);
  3892         size_t newsize = chunksize(p) - leadsize;
  3893 
  3894         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
  3895           newp->prev_foot = p->prev_foot + leadsize;
  3896           newp->head = (newsize|CINUSE_BIT);
  3897         }
  3898         else { /* Otherwise, give back leader, use the rest */
  3899           set_inuse(m, newp, newsize);
  3900           set_inuse(m, p, leadsize);
  3901           leader = chunk2mem(p);
  3902         }
  3903         p = newp;
  3904       }
  3905 
  3906       /* Give back spare room at the end */
  3907       if (!is_mmapped(p)) {
  3908         size_t size = chunksize(p);
  3909         if (size > nb + MIN_CHUNK_SIZE) {
  3910           size_t remainder_size = size - nb;
  3911           mchunkptr remainder = chunk_plus_offset(p, nb);
  3912           set_inuse(m, p, nb);
  3913           set_inuse(m, remainder, remainder_size);
  3914           trailer = chunk2mem(remainder);
  3915         }
  3916       }
  3917 
  3918       assert (chunksize(p) >= nb);
  3919       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
  3920       check_inuse_chunk(m, p);
  3921       POSTACTION(m);
  3922       if (leader != 0) {
  3923         internal_free(m, leader);
  3924       }
  3925       if (trailer != 0) {
  3926         internal_free(m, trailer);
  3927       }
  3928       return chunk2mem(p);
  3929     }
  3930   }
  3931   return 0;
  3932 }
  3933 
  3934 /* ------------------------ comalloc/coalloc support --------------------- */
  3935 
  3936 static void** ialloc(mstate m,
  3937                      size_t n_elements,
  3938                      size_t* sizes,
  3939                      int opts,
  3940                      void* chunks[]) {
  3941   /*
  3942     This provides common support for independent_X routines, handling
  3943     all of the combinations that can result.
  3944 
  3945     The opts arg has:
  3946     bit 0 set if all elements are same size (using sizes[0])
  3947     bit 1 set if elements should be zeroed
  3948   */
  3949 
  3950   size_t    element_size;   /* chunksize of each element, if all same */
  3951   size_t    contents_size;  /* total size of elements */
  3952   size_t    array_size;     /* request size of pointer array */
  3953   void*     mem;            /* malloced aggregate space */
  3954   mchunkptr p;              /* corresponding chunk */
  3955   size_t    remainder_size; /* remaining bytes while splitting */
  3956   void**    marray;         /* either "chunks" or malloced ptr array */
  3957   mchunkptr array_chunk;    /* chunk for malloced ptr array */
  3958   flag_t    was_enabled;    /* to disable mmap */
  3959   size_t    size;
  3960   size_t    i;
  3961 
  3962   /* compute array length, if needed */
  3963   if (chunks != 0) {
  3964     if (n_elements == 0)
  3965       return chunks; /* nothing to do */
  3966     marray = chunks;
  3967     array_size = 0;
  3968   }
  3969   else {
  3970     /* if empty req, must still return chunk representing empty array */
  3971     if (n_elements == 0)
  3972       return (void**)internal_malloc(m, 0);
  3973     marray = 0;
  3974     array_size = request2size(n_elements * (sizeof(void*)));
  3975   }
  3976 
  3977   /* compute total element size */
  3978   if (opts & 0x1) { /* all-same-size */
  3979     element_size = request2size(*sizes);
  3980     contents_size = n_elements * element_size;
  3981   }
  3982   else { /* add up all the sizes */
  3983     element_size = 0;
  3984     contents_size = 0;
  3985     for (i = 0; i != n_elements; ++i)
  3986       contents_size += request2size(sizes[i]);
  3987   }
  3988 
  3989   size = contents_size + array_size;
  3990 
  3991   /*
  3992      Allocate the aggregate chunk.  First disable direct-mmapping so
  3993      malloc won't use it, since we would not be able to later
  3994      free/realloc space internal to a segregated mmap region.
  3995   */
  3996   was_enabled = use_mmap(m);
  3997   disable_mmap(m);
  3998   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  3999   if (was_enabled)
  4000     enable_mmap(m);
  4001   if (mem == 0)
  4002     return 0;
  4003 
  4004   if (PREACTION(m)) return 0;
  4005   p = mem2chunk(mem);
  4006   remainder_size = chunksize(p);
  4007 
  4008   assert(!is_mmapped(p));
  4009 
  4010   if (opts & 0x2) {       /* optionally clear the elements */
  4011     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  4012   }
  4013 
  4014   /* If not provided, allocate the pointer array as final part of chunk */
  4015   if (marray == 0) {
  4016     size_t  array_chunk_size;
  4017     array_chunk = chunk_plus_offset(p, contents_size);
  4018     array_chunk_size = remainder_size - contents_size;
  4019     marray = (void**) (chunk2mem(array_chunk));
  4020     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  4021     remainder_size = contents_size;
  4022   }
  4023 
  4024   /* split out elements */
  4025   for (i = 0; ; ++i) {
  4026     marray[i] = chunk2mem(p);
  4027     if (i != n_elements-1) {
  4028       if (element_size != 0)
  4029         size = element_size;
  4030       else
  4031         size = request2size(sizes[i]);
  4032       remainder_size -= size;
  4033       set_size_and_pinuse_of_inuse_chunk(m, p, size);
  4034       p = chunk_plus_offset(p, size);
  4035     }
  4036     else { /* the final element absorbs any overallocation slop */
  4037       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  4038       break;
  4039     }
  4040   }
  4041 
  4042 #if DEBUG
  4043   if (marray != chunks) {
  4044     /* final element must have exactly exhausted chunk */
  4045     if (element_size != 0) {
  4046       assert(remainder_size == element_size);
  4047     }
  4048     else {
  4049       assert(remainder_size == request2size(sizes[i]));
  4050     }
  4051     check_inuse_chunk(m, mem2chunk(marray));
  4052   }
  4053   for (i = 0; i != n_elements; ++i)
  4054     check_inuse_chunk(m, mem2chunk(marray[i]));
  4055 
  4056 #endif /* DEBUG */
  4057 
  4058   POSTACTION(m);
  4059   return marray;
  4060 }
  4061 
  4062 
  4063 /* -------------------------- public routines ---------------------------- */
  4064 
  4065 #if !ONLY_MSPACES
  4066 
  4067 void* dlmalloc(size_t bytes) {
  4068   /*
  4069      Basic algorithm:
  4070      If a small request (< 256 bytes minus per-chunk overhead):
  4071        1. If one exists, use a remainderless chunk in associated smallbin.
  4072           (Remainderless means that there are too few excess bytes to
  4073           represent as a chunk.)
  4074        2. If it is big enough, use the dv chunk, which is normally the
  4075           chunk adjacent to the one used for the most recent small request.
  4076        3. If one exists, split the smallest available chunk in a bin,
  4077           saving remainder in dv.
  4078        4. If it is big enough, use the top chunk.
  4079        5. If available, get memory from system and use it
  4080      Otherwise, for a large request:
  4081        1. Find the smallest available binned chunk that fits, and use it
  4082           if it is better fitting than dv chunk, splitting if necessary.
  4083        2. If better fitting than any binned chunk, use the dv chunk.
  4084        3. If it is big enough, use the top chunk.
  4085        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4086        5. If available, get memory from system and use it
  4087 
  4088      The ugly goto's here ensure that postaction occurs along all paths.
  4089   */
  4090 
  4091   if (!PREACTION(gm)) {
  4092     void* mem;
  4093     size_t nb;
  4094     if (bytes <= MAX_SMALL_REQUEST) {
  4095       bindex_t idx;
  4096       binmap_t smallbits;
  4097       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4098       idx = small_index(nb);
  4099       smallbits = gm->smallmap >> idx;
  4100 
  4101       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4102         mchunkptr b, p;
  4103         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4104         b = smallbin_at(gm, idx);
  4105         p = b->fd;
  4106         assert(chunksize(p) == small_index2size(idx));
  4107         unlink_first_small_chunk(gm, b, p, idx);
  4108         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4109         mem = chunk2mem(p);
  4110         check_malloced_chunk(gm, mem, nb);
  4111         goto postaction;
  4112       }
  4113 
  4114       else if (nb > gm->dvsize) {
  4115         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4116           mchunkptr b, p, r;
  4117           size_t rsize;
  4118           bindex_t i;
  4119           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4120           binmap_t leastbit = least_bit(leftbits);
  4121           compute_bit2idx(leastbit, i);
  4122           b = smallbin_at(gm, i);
  4123           p = b->fd;
  4124           assert(chunksize(p) == small_index2size(i));
  4125           unlink_first_small_chunk(gm, b, p, i);
  4126           rsize = small_index2size(i) - nb;
  4127           /* Fit here cannot be remainderless if 4byte sizes */
  4128           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4129             set_inuse_and_pinuse(gm, p, small_index2size(i));
  4130           else {
  4131             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4132             r = chunk_plus_offset(p, nb);
  4133             set_size_and_pinuse_of_free_chunk(r, rsize);
  4134             replace_dv(gm, r, rsize);
  4135           }
  4136           mem = chunk2mem(p);
  4137           check_malloced_chunk(gm, mem, nb);
  4138           goto postaction;
  4139         }
  4140 
  4141         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  4142           check_malloced_chunk(gm, mem, nb);
  4143           goto postaction;
  4144         }
  4145       }
  4146     }
  4147     else if (bytes >= MAX_REQUEST)
  4148       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4149     else {
  4150       nb = pad_request(bytes);
  4151       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  4152         check_malloced_chunk(gm, mem, nb);
  4153         goto postaction;
  4154       }
  4155     }
  4156 
  4157     if (nb <= gm->dvsize) {
  4158       size_t rsize = gm->dvsize - nb;
  4159       mchunkptr p = gm->dv;
  4160       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4161         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  4162         gm->dvsize = rsize;
  4163         set_size_and_pinuse_of_free_chunk(r, rsize);
  4164         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4165       }
  4166       else { /* exhaust dv */
  4167         size_t dvs = gm->dvsize;
  4168         gm->dvsize = 0;
  4169         gm->dv = 0;
  4170         set_inuse_and_pinuse(gm, p, dvs);
  4171       }
  4172       mem = chunk2mem(p);
  4173       check_malloced_chunk(gm, mem, nb);
  4174       goto postaction;
  4175     }
  4176 
  4177     else if (nb < gm->topsize) { /* Split top */
  4178       size_t rsize = gm->topsize -= nb;
  4179       mchunkptr p = gm->top;
  4180       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  4181       r->head = rsize | PINUSE_BIT;
  4182       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4183       mem = chunk2mem(p);
  4184       check_top_chunk(gm, gm->top);
  4185       check_malloced_chunk(gm, mem, nb);
  4186       goto postaction;
  4187     }
  4188 
  4189     mem = sys_alloc(gm, nb);
  4190 
  4191   postaction:
  4192     POSTACTION(gm);
  4193     return mem;
  4194   }
  4195 
  4196   return 0;
  4197 }
  4198 
  4199 void dlfree(void* mem) {
  4200   /*
  4201      Consolidate freed chunks with preceeding or succeeding bordering
  4202      free chunks, if they exist, and then place in a bin.  Intermixed
  4203      with special cases for top, dv, mmapped chunks, and usage errors.
  4204   */
  4205 
  4206   if (mem != 0) {
  4207     mchunkptr p  = mem2chunk(mem);
  4208 #if FOOTERS
  4209     mstate fm = get_mstate_for(p);
  4210     if (!ok_magic(fm)) {
  4211       USAGE_ERROR_ACTION(fm, p);
  4212       return;
  4213     }
  4214 #else /* FOOTERS */
  4215 #define fm gm
  4216 #endif /* FOOTERS */
  4217     if (!PREACTION(fm)) {
  4218       check_inuse_chunk(fm, p);
  4219       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4220         size_t psize = chunksize(p);
  4221         mchunkptr next = chunk_plus_offset(p, psize);
  4222         if (!pinuse(p)) {
  4223           size_t prevsize = p->prev_foot;
  4224           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4225             prevsize &= ~IS_MMAPPED_BIT;
  4226             psize += prevsize + MMAP_FOOT_PAD;
  4227             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4228               fm->footprint -= psize;
  4229             goto postaction;
  4230           }
  4231           else {
  4232             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4233             psize += prevsize;
  4234             p = prev;
  4235             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4236               if (p != fm->dv) {
  4237                 unlink_chunk(fm, p, prevsize);
  4238               }
  4239               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4240                 fm->dvsize = psize;
  4241                 set_free_with_pinuse(p, psize, next);
  4242                 goto postaction;
  4243               }
  4244             }
  4245             else
  4246               goto erroraction;
  4247           }
  4248         }
  4249 
  4250         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4251           if (!cinuse(next)) {  /* consolidate forward */
  4252             if (next == fm->top) {
  4253               size_t tsize = fm->topsize += psize;
  4254               fm->top = p;
  4255               p->head = tsize | PINUSE_BIT;
  4256               if (p == fm->dv) {
  4257                 fm->dv = 0;
  4258                 fm->dvsize = 0;
  4259               }
  4260               if (should_trim(fm, tsize))
  4261                 sys_trim(fm, 0);
  4262               goto postaction;
  4263             }
  4264             else if (next == fm->dv) {
  4265               size_t dsize = fm->dvsize += psize;
  4266               fm->dv = p;
  4267               set_size_and_pinuse_of_free_chunk(p, dsize);
  4268               goto postaction;
  4269             }
  4270             else {
  4271               size_t nsize = chunksize(next);
  4272               psize += nsize;
  4273               unlink_chunk(fm, next, nsize);
  4274               set_size_and_pinuse_of_free_chunk(p, psize);
  4275               if (p == fm->dv) {
  4276                 fm->dvsize = psize;
  4277                 goto postaction;
  4278               }
  4279             }
  4280           }
  4281           else
  4282             set_free_with_pinuse(p, psize, next);
  4283           insert_chunk(fm, p, psize);
  4284           check_free_chunk(fm, p);
  4285           goto postaction;
  4286         }
  4287       }
  4288     erroraction:
  4289       USAGE_ERROR_ACTION(fm, p);
  4290     postaction:
  4291       POSTACTION(fm);
  4292     }
  4293   }
  4294 #if !FOOTERS
  4295 #undef fm
  4296 #endif /* FOOTERS */
  4297 }
  4298 
  4299 void* dlcalloc(size_t n_elements, size_t elem_size) {
  4300   void* mem;
  4301   size_t req = 0;
  4302   if (n_elements != 0) {
  4303     req = n_elements * elem_size;
  4304     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4305         (req / n_elements != elem_size))
  4306       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4307   }
  4308   mem = dlmalloc(req);
  4309   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4310     memset(mem, 0, req);
  4311   return mem;
  4312 }
  4313 
  4314 void* dlrealloc(void* oldmem, size_t bytes) {
  4315   if (oldmem == 0)
  4316     return dlmalloc(bytes);
  4317 #ifdef REALLOC_ZERO_BYTES_FREES
  4318   if (bytes == 0) {
  4319     dlfree(oldmem);
  4320     return 0;
  4321   }
  4322 #endif /* REALLOC_ZERO_BYTES_FREES */
  4323   else {
  4324 #if ! FOOTERS
  4325     mstate m = gm;
  4326 #else /* FOOTERS */
  4327     mstate m = get_mstate_for(mem2chunk(oldmem));
  4328     if (!ok_magic(m)) {
  4329       USAGE_ERROR_ACTION(m, oldmem);
  4330       return 0;
  4331     }
  4332 #endif /* FOOTERS */
  4333     return internal_realloc(m, oldmem, bytes);
  4334   }
  4335 }
  4336 
  4337 void* dlmemalign(size_t alignment, size_t bytes) {
  4338   return internal_memalign(gm, alignment, bytes);
  4339 }
  4340 
  4341 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
  4342                                  void* chunks[]) {
  4343   size_t sz = elem_size; /* serves as 1-element array */
  4344   return ialloc(gm, n_elements, &sz, 3, chunks);
  4345 }
  4346 
  4347 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
  4348                                    void* chunks[]) {
  4349   return ialloc(gm, n_elements, sizes, 0, chunks);
  4350 }
  4351 
  4352 void* dlvalloc(size_t bytes) {
  4353   size_t pagesz;
  4354   init_mparams();
  4355   pagesz = mparams.page_size;
  4356   return dlmemalign(pagesz, bytes);
  4357 }
  4358 
  4359 void* dlpvalloc(size_t bytes) {
  4360   size_t pagesz;
  4361   init_mparams();
  4362   pagesz = mparams.page_size;
  4363   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  4364 }
  4365 
  4366 int dlmalloc_trim(size_t pad) {
  4367   int result = 0;
  4368   if (!PREACTION(gm)) {
  4369     result = sys_trim(gm, pad);
  4370     POSTACTION(gm);
  4371   }
  4372   return result;
  4373 }
  4374 
  4375 size_t dlmalloc_footprint(void) {
  4376   return gm->footprint;
  4377 }
  4378 
  4379 size_t dlmalloc_max_footprint(void) {
  4380   return gm->max_footprint;
  4381 }
  4382 
  4383 #if !NO_MALLINFO
  4384 struct mallinfo dlmallinfo(void) {
  4385   return internal_mallinfo(gm);
  4386 }
  4387 #endif /* NO_MALLINFO */
  4388 
  4389 void dlmalloc_stats() {
  4390   internal_malloc_stats(gm);
  4391 }
  4392 
  4393 size_t dlmalloc_usable_size(void* mem) {
  4394   if (mem != 0) {
  4395     mchunkptr p = mem2chunk(mem);
  4396     if (cinuse(p))
  4397       return chunksize(p) - overhead_for(p);
  4398   }
  4399   return 0;
  4400 }
  4401 
  4402 int dlmallopt(int param_number, int value) {
  4403   return change_mparam(param_number, value);
  4404 }
  4405 
  4406 #endif /* !ONLY_MSPACES */
  4407 
  4408 /* ----------------------------- user mspaces ---------------------------- */
  4409 
  4410 #if MSPACES
  4411 
  4412 static mstate init_user_mstate(char* tbase, size_t tsize) {
  4413   size_t msize = pad_request(sizeof(struct malloc_state));
  4414   mchunkptr mn;
  4415   mchunkptr msp = align_as_chunk(tbase);
  4416   mstate m = (mstate)(chunk2mem(msp));
  4417   memset(m, 0, msize);
  4418   INITIAL_LOCK(&m->mutex);
  4419   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  4420   m->seg.base = m->least_addr = tbase;
  4421   m->seg.size = m->footprint = m->max_footprint = tsize;
  4422   m->magic = mparams.magic;
  4423   m->mflags = mparams.default_mflags;
  4424   disable_contiguous(m);
  4425   init_bins(m);
  4426   mn = next_chunk(mem2chunk(m));
  4427   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  4428   check_top_chunk(m, m->top);
  4429   return m;
  4430 }
  4431 
  4432 mspace create_mspace(size_t capacity, int locked) {
  4433   mstate m = 0;
  4434   size_t msize = pad_request(sizeof(struct malloc_state));
  4435   init_mparams(); /* Ensure pagesize etc initialized */
  4436 
  4437   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4438     size_t rs = ((capacity == 0)? mparams.granularity :
  4439                  (capacity + TOP_FOOT_SIZE + msize));
  4440     size_t tsize = granularity_align(rs);
  4441     char* tbase = (char*)(CALL_MMAP(tsize));
  4442     if (tbase != CMFAIL) {
  4443       m = init_user_mstate(tbase, tsize);
  4444       m->seg.sflags = IS_MMAPPED_BIT;
  4445       set_lock(m, locked);
  4446     }
  4447   }
  4448   return (mspace)m;
  4449 }
  4450 
  4451 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  4452   mstate m = 0;
  4453   size_t msize = pad_request(sizeof(struct malloc_state));
  4454   init_mparams(); /* Ensure pagesize etc initialized */
  4455 
  4456   if (capacity > msize + TOP_FOOT_SIZE &&
  4457       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4458     m = init_user_mstate((char*)base, capacity);
  4459     m->seg.sflags = EXTERN_BIT;
  4460     set_lock(m, locked);
  4461   }
  4462   return (mspace)m;
  4463 }
  4464 
  4465 size_t destroy_mspace(mspace msp) {
  4466   size_t freed = 0;
  4467   mstate ms = (mstate)msp;
  4468   if (ok_magic(ms)) {
  4469     msegmentptr sp = &ms->seg;
  4470     while (sp != 0) {
  4471       char* base = sp->base;
  4472       size_t size = sp->size;
  4473       flag_t flag = sp->sflags;
  4474       sp = sp->next;
  4475       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4476           CALL_MUNMAP(base, size) == 0)
  4477         freed += size;
  4478     }
  4479   }
  4480   else {
  4481     USAGE_ERROR_ACTION(ms,ms);
  4482   }
  4483   return freed;
  4484 }
  4485 
  4486 /*
  4487   mspace versions of routines are near-clones of the global
  4488   versions. This is not so nice but better than the alternatives.
  4489 */
  4490 
  4491 
  4492 void* mspace_malloc(mspace msp, size_t bytes) {
  4493   mstate ms = (mstate)msp;
  4494   if (!ok_magic(ms)) {
  4495     USAGE_ERROR_ACTION(ms,ms);
  4496     return 0;
  4497   }
  4498   if (!PREACTION(ms)) {
  4499     void* mem;
  4500     size_t nb;
  4501     if (bytes <= MAX_SMALL_REQUEST) {
  4502       bindex_t idx;
  4503       binmap_t smallbits;
  4504       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4505       idx = small_index(nb);
  4506       smallbits = ms->smallmap >> idx;
  4507 
  4508       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4509         mchunkptr b, p;
  4510         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4511         b = smallbin_at(ms, idx);
  4512         p = b->fd;
  4513         assert(chunksize(p) == small_index2size(idx));
  4514         unlink_first_small_chunk(ms, b, p, idx);
  4515         set_inuse_and_pinuse(ms, p, small_index2size(idx));
  4516         mem = chunk2mem(p);
  4517         check_malloced_chunk(ms, mem, nb);
  4518         goto postaction;
  4519       }
  4520 
  4521       else if (nb > ms->dvsize) {
  4522         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4523           mchunkptr b, p, r;
  4524           size_t rsize;
  4525           bindex_t i;
  4526           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4527           binmap_t leastbit = least_bit(leftbits);
  4528           compute_bit2idx(leastbit, i);
  4529           b = smallbin_at(ms, i);
  4530           p = b->fd;
  4531           assert(chunksize(p) == small_index2size(i));
  4532           unlink_first_small_chunk(ms, b, p, i);
  4533           rsize = small_index2size(i) - nb;
  4534           /* Fit here cannot be remainderless if 4byte sizes */
  4535           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4536             set_inuse_and_pinuse(ms, p, small_index2size(i));
  4537           else {
  4538             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4539             r = chunk_plus_offset(p, nb);
  4540             set_size_and_pinuse_of_free_chunk(r, rsize);
  4541             replace_dv(ms, r, rsize);
  4542           }
  4543           mem = chunk2mem(p);
  4544           check_malloced_chunk(ms, mem, nb);
  4545           goto postaction;
  4546         }
  4547 
  4548         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  4549           check_malloced_chunk(ms, mem, nb);
  4550           goto postaction;
  4551         }
  4552       }
  4553     }
  4554     else if (bytes >= MAX_REQUEST)
  4555       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4556     else {
  4557       nb = pad_request(bytes);
  4558       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  4559         check_malloced_chunk(ms, mem, nb);
  4560         goto postaction;
  4561       }
  4562     }
  4563 
  4564     if (nb <= ms->dvsize) {
  4565       size_t rsize = ms->dvsize - nb;
  4566       mchunkptr p = ms->dv;
  4567       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4568         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  4569         ms->dvsize = rsize;
  4570         set_size_and_pinuse_of_free_chunk(r, rsize);
  4571         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4572       }
  4573       else { /* exhaust dv */
  4574         size_t dvs = ms->dvsize;
  4575         ms->dvsize = 0;
  4576         ms->dv = 0;
  4577         set_inuse_and_pinuse(ms, p, dvs);
  4578       }
  4579       mem = chunk2mem(p);
  4580       check_malloced_chunk(ms, mem, nb);
  4581       goto postaction;
  4582     }
  4583 
  4584     else if (nb < ms->topsize) { /* Split top */
  4585       size_t rsize = ms->topsize -= nb;
  4586       mchunkptr p = ms->top;
  4587       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  4588       r->head = rsize | PINUSE_BIT;
  4589       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4590       mem = chunk2mem(p);
  4591       check_top_chunk(ms, ms->top);
  4592       check_malloced_chunk(ms, mem, nb);
  4593       goto postaction;
  4594     }
  4595 
  4596     mem = sys_alloc(ms, nb);
  4597 
  4598   postaction:
  4599     POSTACTION(ms);
  4600     return mem;
  4601   }
  4602 
  4603   return 0;
  4604 }
  4605 
  4606 void mspace_free(mspace msp, void* mem) {
  4607   if (mem != 0) {
  4608     mchunkptr p  = mem2chunk(mem);
  4609 #if FOOTERS
  4610     mstate fm = get_mstate_for(p);
  4611 #else /* FOOTERS */
  4612     mstate fm = (mstate)msp;
  4613 #endif /* FOOTERS */
  4614     if (!ok_magic(fm)) {
  4615       USAGE_ERROR_ACTION(fm, p);
  4616       return;
  4617     }
  4618     if (!PREACTION(fm)) {
  4619       check_inuse_chunk(fm, p);
  4620       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4621         size_t psize = chunksize(p);
  4622         mchunkptr next = chunk_plus_offset(p, psize);
  4623         if (!pinuse(p)) {
  4624           size_t prevsize = p->prev_foot;
  4625           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4626             prevsize &= ~IS_MMAPPED_BIT;
  4627             psize += prevsize + MMAP_FOOT_PAD;
  4628             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4629               fm->footprint -= psize;
  4630             goto postaction;
  4631           }
  4632           else {
  4633             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4634             psize += prevsize;
  4635             p = prev;
  4636             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4637               if (p != fm->dv) {
  4638                 unlink_chunk(fm, p, prevsize);
  4639               }
  4640               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4641                 fm->dvsize = psize;
  4642                 set_free_with_pinuse(p, psize, next);
  4643                 goto postaction;
  4644               }
  4645             }
  4646             else
  4647               goto erroraction;
  4648           }
  4649         }
  4650 
  4651         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4652           if (!cinuse(next)) {  /* consolidate forward */
  4653             if (next == fm->top) {
  4654               size_t tsize = fm->topsize += psize;
  4655               fm->top = p;
  4656               p->head = tsize | PINUSE_BIT;
  4657               if (p == fm->dv) {
  4658                 fm->dv = 0;
  4659                 fm->dvsize = 0;
  4660               }
  4661               if (should_trim(fm, tsize))
  4662                 sys_trim(fm, 0);
  4663               goto postaction;
  4664             }
  4665             else if (next == fm->dv) {
  4666               size_t dsize = fm->dvsize += psize;
  4667               fm->dv = p;
  4668               set_size_and_pinuse_of_free_chunk(p, dsize);
  4669               goto postaction;
  4670             }
  4671             else {
  4672               size_t nsize = chunksize(next);
  4673               psize += nsize;
  4674               unlink_chunk(fm, next, nsize);
  4675               set_size_and_pinuse_of_free_chunk(p, psize);
  4676               if (p == fm->dv) {
  4677                 fm->dvsize = psize;
  4678                 goto postaction;
  4679               }
  4680             }
  4681           }
  4682           else
  4683             set_free_with_pinuse(p, psize, next);
  4684           insert_chunk(fm, p, psize);
  4685           check_free_chunk(fm, p);
  4686           goto postaction;
  4687         }
  4688       }
  4689     erroraction:
  4690       USAGE_ERROR_ACTION(fm, p);
  4691     postaction:
  4692       POSTACTION(fm);
  4693     }
  4694   }
  4695 }
  4696 
  4697 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
  4698   void* mem;
  4699   size_t req = 0;
  4700   mstate ms = (mstate)msp;
  4701   if (!ok_magic(ms)) {
  4702     USAGE_ERROR_ACTION(ms,ms);
  4703     return 0;
  4704   }
  4705   if (n_elements != 0) {
  4706     req = n_elements * elem_size;
  4707     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4708         (req / n_elements != elem_size))
  4709       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4710   }
  4711   mem = internal_malloc(ms, req);
  4712   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4713     memset(mem, 0, req);
  4714   return mem;
  4715 }
  4716 
  4717 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
  4718   if (oldmem == 0)
  4719     return mspace_malloc(msp, bytes);
  4720 #ifdef REALLOC_ZERO_BYTES_FREES
  4721   if (bytes == 0) {
  4722     mspace_free(msp, oldmem);
  4723     return 0;
  4724   }
  4725 #endif /* REALLOC_ZERO_BYTES_FREES */
  4726   else {
  4727 #if FOOTERS
  4728     mchunkptr p  = mem2chunk(oldmem);
  4729     mstate ms = get_mstate_for(p);
  4730 #else /* FOOTERS */
  4731     mstate ms = (mstate)msp;
  4732 #endif /* FOOTERS */
  4733     if (!ok_magic(ms)) {
  4734       USAGE_ERROR_ACTION(ms,ms);
  4735       return 0;
  4736     }
  4737     return internal_realloc(ms, oldmem, bytes);
  4738   }
  4739 }
  4740 
  4741 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
  4742   mstate ms = (mstate)msp;
  4743   if (!ok_magic(ms)) {
  4744     USAGE_ERROR_ACTION(ms,ms);
  4745     return 0;
  4746   }
  4747   return internal_memalign(ms, alignment, bytes);
  4748 }
  4749 
  4750 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  4751                                  size_t elem_size, void* chunks[]) {
  4752   size_t sz = elem_size; /* serves as 1-element array */
  4753   mstate ms = (mstate)msp;
  4754   if (!ok_magic(ms)) {
  4755     USAGE_ERROR_ACTION(ms,ms);
  4756     return 0;
  4757   }
  4758   return ialloc(ms, n_elements, &sz, 3, chunks);
  4759 }
  4760 
  4761 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  4762                                    size_t sizes[], void* chunks[]) {
  4763   mstate ms = (mstate)msp;
  4764   if (!ok_magic(ms)) {
  4765     USAGE_ERROR_ACTION(ms,ms);
  4766     return 0;
  4767   }
  4768   return ialloc(ms, n_elements, sizes, 0, chunks);
  4769 }
  4770 
  4771 int mspace_trim(mspace msp, size_t pad) {
  4772   int result = 0;
  4773   mstate ms = (mstate)msp;
  4774   if (ok_magic(ms)) {
  4775     if (!PREACTION(ms)) {
  4776       result = sys_trim(ms, pad);
  4777       POSTACTION(ms);
  4778     }
  4779   }
  4780   else {
  4781     USAGE_ERROR_ACTION(ms,ms);
  4782   }
  4783   return result;
  4784 }
  4785 
  4786 void mspace_malloc_stats(mspace msp) {
  4787   mstate ms = (mstate)msp;
  4788   if (ok_magic(ms)) {
  4789     internal_malloc_stats(ms);
  4790   }
  4791   else {
  4792     USAGE_ERROR_ACTION(ms,ms);
  4793   }
  4794 }
  4795 
  4796 size_t mspace_footprint(mspace msp) {
  4797   size_t result;
  4798   mstate ms = (mstate)msp;
  4799   if (ok_magic(ms)) {
  4800     result = ms->footprint;
  4801   }
  4802   USAGE_ERROR_ACTION(ms,ms);
  4803   return result;
  4804 }
  4805 
  4806 
  4807 size_t mspace_max_footprint(mspace msp) {
  4808   size_t result;
  4809   mstate ms = (mstate)msp;
  4810   if (ok_magic(ms)) {
  4811     result = ms->max_footprint;
  4812   }
  4813   USAGE_ERROR_ACTION(ms,ms);
  4814   return result;
  4815 }
  4816 
  4817 
  4818 #if !NO_MALLINFO
  4819 struct mallinfo mspace_mallinfo(mspace msp) {
  4820   mstate ms = (mstate)msp;
  4821   if (!ok_magic(ms)) {
  4822     USAGE_ERROR_ACTION(ms,ms);
  4823   }
  4824   return internal_mallinfo(ms);
  4825 }
  4826 #endif /* NO_MALLINFO */
  4827 
  4828 int mspace_mallopt(int param_number, int value) {
  4829   return change_mparam(param_number, value);
  4830 }
  4831 
  4832 #endif /* MSPACES */
  4833 
  4834 /* -------------------- Alternative MORECORE functions ------------------- */
  4835 
  4836 /*
  4837   Guidelines for creating a custom version of MORECORE:
  4838 
  4839   * For best performance, MORECORE should allocate in multiples of pagesize.
  4840   * MORECORE may allocate more memory than requested. (Or even less,
  4841       but this will usually result in a malloc failure.)
  4842   * MORECORE must not allocate memory when given argument zero, but
  4843       instead return one past the end address of memory from previous
  4844       nonzero call.
  4845   * For best performance, consecutive calls to MORECORE with positive
  4846       arguments should return increasing addresses, indicating that
  4847       space has been contiguously extended.
  4848   * Even though consecutive calls to MORECORE need not return contiguous
  4849       addresses, it must be OK for malloc'ed chunks to span multiple
  4850       regions in those cases where they do happen to be contiguous.
  4851   * MORECORE need not handle negative arguments -- it may instead
  4852       just return MFAIL when given negative arguments.
  4853       Negative arguments are always multiples of pagesize. MORECORE
  4854       must not misinterpret negative args as large positive unsigned
  4855       args. You can suppress all such calls from even occurring by defining
  4856       MORECORE_CANNOT_TRIM,
  4857 
  4858   As an example alternative MORECORE, here is a custom allocator
  4859   kindly contributed for pre-OSX macOS.  It uses virtually but not
  4860   necessarily physically contiguous non-paged memory (locked in,
  4861   present and won't get swapped out).  You can use it by uncommenting
  4862   this section, adding some #includes, and setting up the appropriate
  4863   defines above:
  4864 
  4865       #define MORECORE osMoreCore
  4866 
  4867   There is also a shutdown routine that should somehow be called for
  4868   cleanup upon program exit.
  4869 
  4870   #define MAX_POOL_ENTRIES 100
  4871   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  4872   static int next_os_pool;
  4873   void *our_os_pools[MAX_POOL_ENTRIES];
  4874 
  4875   void *osMoreCore(int size)
  4876   {
  4877     void *ptr = 0;
  4878     static void *sbrk_top = 0;
  4879 
  4880     if (size > 0)
  4881     {
  4882       if (size < MINIMUM_MORECORE_SIZE)
  4883          size = MINIMUM_MORECORE_SIZE;
  4884       if (CurrentExecutionLevel() == kTaskLevel)
  4885          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  4886       if (ptr == 0)
  4887       {
  4888         return (void *) MFAIL;
  4889       }
  4890       // save ptrs so they can be freed during cleanup
  4891       our_os_pools[next_os_pool] = ptr;
  4892       next_os_pool++;
  4893       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  4894       sbrk_top = (char *) ptr + size;
  4895       return ptr;
  4896     }
  4897     else if (size < 0)
  4898     {
  4899       // we don't currently support shrink behavior
  4900       return (void *) MFAIL;
  4901     }
  4902     else
  4903     {
  4904       return sbrk_top;
  4905     }
  4906   }
  4907 
  4908   // cleanup any allocated memory pools
  4909   // called as last thing before shutting down driver
  4910 
  4911   void osCleanupMem(void)
  4912   {
  4913     void **ptr;
  4914 
  4915     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  4916       if (*ptr)
  4917       {
  4918          PoolDeallocate(*ptr);
  4919          *ptr = 0;
  4920       }
  4921   }
  4922 
  4923 */
  4924 
  4925 
  4926 /* -----------------------------------------------------------------------
  4927 History:
  4928     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  4929       * Add max_footprint functions
  4930       * Ensure all appropriate literals are size_t
  4931       * Fix conditional compilation problem for some #define settings
  4932       * Avoid concatenating segments with the one provided
  4933         in create_mspace_with_base
  4934       * Rename some variables to avoid compiler shadowing warnings
  4935       * Use explicit lock initialization.
  4936       * Better handling of sbrk interference.
  4937       * Simplify and fix segment insertion, trimming and mspace_destroy
  4938       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  4939       * Thanks especially to Dennis Flanagan for help on these.
  4940 
  4941     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  4942       * Fix memalign brace error.
  4943 
  4944     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  4945       * Fix improper #endif nesting in C++
  4946       * Add explicit casts needed for C++
  4947 
  4948     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  4949       * Use trees for large bins
  4950       * Support mspaces
  4951       * Use segments to unify sbrk-based and mmap-based system allocation,
  4952         removing need for emulation on most platforms without sbrk.
  4953       * Default safety checks
  4954       * Optional footer checks. Thanks to William Robertson for the idea.
  4955       * Internal code refactoring
  4956       * Incorporate suggestions and platform-specific changes.
  4957         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  4958         Aaron Bachmann,  Emery Berger, and others.
  4959       * Speed up non-fastbin processing enough to remove fastbins.
  4960       * Remove useless cfree() to avoid conflicts with other apps.
  4961       * Remove internal memcpy, memset. Compilers handle builtins better.
  4962       * Remove some options that no one ever used and rename others.
  4963 
  4964     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  4965       * Fix malloc_state bitmap array misdeclaration
  4966 
  4967     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  4968       * Allow tuning of FIRST_SORTED_BIN_SIZE
  4969       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  4970       * Better detection and support for non-contiguousness of MORECORE.
  4971         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  4972       * Bypass most of malloc if no frees. Thanks To Emery Berger.
  4973       * Fix freeing of old top non-contiguous chunk im sysmalloc.
  4974       * Raised default trim and map thresholds to 256K.
  4975       * Fix mmap-related #defines. Thanks to Lubos Lunak.
  4976       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  4977       * Branch-free bin calculation
  4978       * Default trim and mmap thresholds now 256K.
  4979 
  4980     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  4981       * Introduce independent_comalloc and independent_calloc.
  4982         Thanks to Michael Pachos for motivation and help.
  4983       * Make optional .h file available
  4984       * Allow > 2GB requests on 32bit systems.
  4985       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  4986         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  4987         and Anonymous.
  4988       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  4989         helping test this.)
  4990       * memalign: check alignment arg
  4991       * realloc: don't try to shift chunks backwards, since this
  4992         leads to  more fragmentation in some programs and doesn't
  4993         seem to help in any others.
  4994       * Collect all cases in malloc requiring system memory into sysmalloc
  4995       * Use mmap as backup to sbrk
  4996       * Place all internal state in malloc_state
  4997       * Introduce fastbins (although similar to 2.5.1)
  4998       * Many minor tunings and cosmetic improvements
  4999       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5000       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5001         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5002       * Include errno.h to support default failure action.
  5003 
  5004     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5005       * return null for negative arguments
  5006       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5007          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5008           (e.g. WIN32 platforms)
  5009          * Cleanup header file inclusion for WIN32 platforms
  5010          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5011          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5012            memory allocation routines
  5013          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5014          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5015            usage of 'assert' in non-WIN32 code
  5016          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5017            avoid infinite loop
  5018       * Always call 'fREe()' rather than 'free()'
  5019 
  5020     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5021       * Fixed ordering problem with boundary-stamping
  5022 
  5023     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5024       * Added pvalloc, as recommended by H.J. Liu
  5025       * Added 64bit pointer support mainly from Wolfram Gloger
  5026       * Added anonymously donated WIN32 sbrk emulation
  5027       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5028       * malloc_extend_top: fix mask error that caused wastage after
  5029         foreign sbrks
  5030       * Add linux mremap support code from HJ Liu
  5031 
  5032     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5033       * Integrated most documentation with the code.
  5034       * Add support for mmap, with help from
  5035         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5036       * Use last_remainder in more cases.
  5037       * Pack bins using idea from  colin@nyx10.cs.du.edu
  5038       * Use ordered bins instead of best-fit threshhold
  5039       * Eliminate block-local decls to simplify tracing and debugging.
  5040       * Support another case of realloc via move into top
  5041       * Fix error occuring when initial sbrk_base not word-aligned.
  5042       * Rely on page size for units instead of SBRK_UNIT to
  5043         avoid surprises about sbrk alignment conventions.
  5044       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5045         (raymond@es.ele.tue.nl) for the suggestion.
  5046       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5047       * More precautions for cases where other routines call sbrk,
  5048         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5049       * Added macros etc., allowing use in linux libc from
  5050         H.J. Lu (hjl@gnu.ai.mit.edu)
  5051       * Inverted this history list
  5052 
  5053     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5054       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5055       * Removed all preallocation code since under current scheme
  5056         the work required to undo bad preallocations exceeds
  5057         the work saved in good cases for most test programs.
  5058       * No longer use return list or unconsolidated bins since
  5059         no scheme using them consistently outperforms those that don't
  5060         given above changes.
  5061       * Use best fit for very large chunks to prevent some worst-cases.
  5062       * Added some support for debugging
  5063 
  5064     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5065       * Removed footers when chunks are in use. Thanks to
  5066         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5067 
  5068     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5069       * Added malloc_trim, with help from Wolfram Gloger
  5070         (wmglo@Dent.MED.Uni-Muenchen.DE).
  5071 
  5072     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5073 
  5074     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5075       * realloc: try to expand in both directions
  5076       * malloc: swap order of clean-bin strategy;
  5077       * realloc: only conditionally expand backwards
  5078       * Try not to scavenge used bins
  5079       * Use bin counts as a guide to preallocation
  5080       * Occasionally bin return list chunks in first scan
  5081       * Add a few optimizations from colin@nyx10.cs.du.edu
  5082 
  5083     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5084       * faster bin computation & slightly different binning
  5085       * merged all consolidations to one part of malloc proper
  5086          (eliminating old malloc_find_space & malloc_clean_bin)
  5087       * Scan 2 returns chunks (not just 1)
  5088       * Propagate failure in realloc if malloc returns 0
  5089       * Add stuff to allow compilation on non-ANSI compilers
  5090           from kpv@research.att.com
  5091 
  5092     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5093       * removed potential for odd address access in prev_chunk
  5094       * removed dependency on getpagesize.h
  5095       * misc cosmetics and a bit more internal documentation
  5096       * anticosmetics: mangled names in macros to evade debugger strangeness
  5097       * tested on sparc, hp-700, dec-mips, rs6000
  5098           with gcc & native cc (hp, dec only) allowing
  5099           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5100 
  5101     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5102       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5103          structure of old version,  but most details differ.)
  5104  
  5105 */
  5106 
  5107 #endif /* !HAVE_MALLOC */