src/stdlib/SDL_malloc.c
author Ryan C. Gordon <icculus@icculus.org>
Sat, 14 Jul 2007 06:45:01 +0000
branchSDL-1.2
changeset 4061 95ecf165f113
parent 1465 8dfa9a6d69a5
child 4063 d634945c6172
permissions -rw-r--r--
Mac OS X thinks we want a function call to SDL_memcpy() and not memcpy() here,
thanks to macro replacement issues.
     1 /*
     2     SDL - Simple DirectMedia Layer
     3     Copyright (C) 1997-2006 Sam Lantinga
     4 
     5     This library is free software; you can redistribute it and/or
     6     modify it under the terms of the GNU Lesser General Public
     7     License as published by the Free Software Foundation; either
     8     version 2.1 of the License, or (at your option) any later version.
     9 
    10     This library is distributed in the hope that it will be useful,
    11     but WITHOUT ANY WARRANTY; without even the implied warranty of
    12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    13     Lesser General Public License for more details.
    14 
    15     You should have received a copy of the GNU Lesser General Public
    16     License along with this library; if not, write to the Free Software
    17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    18 
    19     Sam Lantinga
    20     slouken@libsdl.org
    21 */
    22 #include "SDL_config.h"
    23 
    24 /* This file contains portable memory management functions for SDL */
    25 
    26 #include "SDL_stdinc.h"
    27 
    28 #ifndef HAVE_MALLOC
    29 
    30 #define LACKS_SYS_TYPES_H
    31 #define LACKS_STDIO_H
    32 #define LACKS_STRINGS_H
    33 #define LACKS_STRING_H
    34 #define LACKS_STDLIB_H
    35 #define ABORT
    36 
    37 /*
    38   This is a version (aka dlmalloc) of malloc/free/realloc written by
    39   Doug Lea and released to the public domain, as explained at
    40   http://creativecommons.org/licenses/publicdomain.  Send questions,
    41   comments, complaints, performance data, etc to dl@cs.oswego.edu
    42 
    43 * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    44 
    45    Note: There may be an updated version of this malloc obtainable at
    46            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    47          Check before installing!
    48 
    49 * Quickstart
    50 
    51   This library is all in one file to simplify the most common usage:
    52   ftp it, compile it (-O3), and link it into another program. All of
    53   the compile-time options default to reasonable values for use on
    54   most platforms.  You might later want to step through various
    55   compile-time and dynamic tuning options.
    56 
    57   For convenience, an include file for code using this malloc is at:
    58      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    59   You don't really need this .h file unless you call functions not
    60   defined in your system include files.  The .h file contains only the
    61   excerpts from this file needed for using this malloc on ANSI C/C++
    62   systems, so long as you haven't changed compile-time options about
    63   naming and tuning parameters.  If you do, then you can create your
    64   own malloc.h that does include all settings by cutting at the point
    65   indicated below. Note that you may already by default be using a C
    66   library containing a malloc that is based on some version of this
    67   malloc (for example in linux). You might still want to use the one
    68   in this file to customize settings or to avoid overheads associated
    69   with library versions.
    70 
    71 * Vital statistics:
    72 
    73   Supported pointer/size_t representation:       4 or 8 bytes
    74        size_t MUST be an unsigned type of the same width as
    75        pointers. (If you are using an ancient system that declares
    76        size_t as a signed type, or need it to be a different width
    77        than pointers, you can use a previous release of this malloc
    78        (e.g. 2.7.2) supporting these.)
    79 
    80   Alignment:                                     8 bytes (default)
    81        This suffices for nearly all current machines and C compilers.
    82        However, you can define MALLOC_ALIGNMENT to be wider than this
    83        if necessary (up to 128bytes), at the expense of using more space.
    84 
    85   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    86                                           8 or 16 bytes (if 8byte sizes)
    87        Each malloced chunk has a hidden word of overhead holding size
    88        and status information, and additional cross-check word
    89        if FOOTERS is defined.
    90 
    91   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
    92                           8-byte ptrs:  32 bytes    (including overhead)
    93 
    94        Even a request for zero bytes (i.e., malloc(0)) returns a
    95        pointer to something of the minimum allocatable size.
    96        The maximum overhead wastage (i.e., number of extra bytes
    97        allocated than were requested in malloc) is less than or equal
    98        to the minimum size, except for requests >= mmap_threshold that
    99        are serviced via mmap(), where the worst case wastage is about
   100        32 bytes plus the remainder from a system page (the minimal
   101        mmap unit); typically 4096 or 8192 bytes.
   102 
   103   Security: static-safe; optionally more or less
   104        The "security" of malloc refers to the ability of malicious
   105        code to accentuate the effects of errors (for example, freeing
   106        space that is not currently malloc'ed or overwriting past the
   107        ends of chunks) in code that calls malloc.  This malloc
   108        guarantees not to modify any memory locations below the base of
   109        heap, i.e., static variables, even in the presence of usage
   110        errors.  The routines additionally detect most improper frees
   111        and reallocs.  All this holds as long as the static bookkeeping
   112        for malloc itself is not corrupted by some other means.  This
   113        is only one aspect of security -- these checks do not, and
   114        cannot, detect all possible programming errors.
   115 
   116        If FOOTERS is defined nonzero, then each allocated chunk
   117        carries an additional check word to verify that it was malloced
   118        from its space.  These check words are the same within each
   119        execution of a program using malloc, but differ across
   120        executions, so externally crafted fake chunks cannot be
   121        freed. This improves security by rejecting frees/reallocs that
   122        could corrupt heap memory, in addition to the checks preventing
   123        writes to statics that are always on.  This may further improve
   124        security at the expense of time and space overhead.  (Note that
   125        FOOTERS may also be worth using with MSPACES.)
   126 
   127        By default detected errors cause the program to abort (calling
   128        "abort()"). You can override this to instead proceed past
   129        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   130        has no effect, and a malloc that encounters a bad address
   131        caused by user overwrites will ignore the bad address by
   132        dropping pointers and indices to all known memory. This may
   133        be appropriate for programs that should continue if at all
   134        possible in the face of programming errors, although they may
   135        run out of memory because dropped memory is never reclaimed.
   136 
   137        If you don't like either of these options, you can define
   138        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   139        else. And if if you are sure that your program using malloc has
   140        no errors or vulnerabilities, you can define INSECURE to 1,
   141        which might (or might not) provide a small performance improvement.
   142 
   143   Thread-safety: NOT thread-safe unless USE_LOCKS defined
   144        When USE_LOCKS is defined, each public call to malloc, free,
   145        etc is surrounded with either a pthread mutex or a win32
   146        spinlock (depending on WIN32). This is not especially fast, and
   147        can be a major bottleneck.  It is designed only to provide
   148        minimal protection in concurrent environments, and to provide a
   149        basis for extensions.  If you are using malloc in a concurrent
   150        program, consider instead using ptmalloc, which is derived from
   151        a version of this malloc. (See http://www.malloc.de).
   152 
   153   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   154        This malloc can use unix sbrk or any emulation (invoked using
   155        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   156        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   157        memory.  On most unix systems, it tends to work best if both
   158        MORECORE and MMAP are enabled.  On Win32, it uses emulations
   159        based on VirtualAlloc. It also uses common C library functions
   160        like memset.
   161 
   162   Compliance: I believe it is compliant with the Single Unix Specification
   163        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   164        others as well.
   165 
   166 * Overview of algorithms
   167 
   168   This is not the fastest, most space-conserving, most portable, or
   169   most tunable malloc ever written. However it is among the fastest
   170   while also being among the most space-conserving, portable and
   171   tunable.  Consistent balance across these factors results in a good
   172   general-purpose allocator for malloc-intensive programs.
   173 
   174   In most ways, this malloc is a best-fit allocator. Generally, it
   175   chooses the best-fitting existing chunk for a request, with ties
   176   broken in approximately least-recently-used order. (This strategy
   177   normally maintains low fragmentation.) However, for requests less
   178   than 256bytes, it deviates from best-fit when there is not an
   179   exactly fitting available chunk by preferring to use space adjacent
   180   to that used for the previous small request, as well as by breaking
   181   ties in approximately most-recently-used order. (These enhance
   182   locality of series of small allocations.)  And for very large requests
   183   (>= 256Kb by default), it relies on system memory mapping
   184   facilities, if supported.  (This helps avoid carrying around and
   185   possibly fragmenting memory used only for large chunks.)
   186 
   187   All operations (except malloc_stats and mallinfo) have execution
   188   times that are bounded by a constant factor of the number of bits in
   189   a size_t, not counting any clearing in calloc or copying in realloc,
   190   or actions surrounding MORECORE and MMAP that have times
   191   proportional to the number of non-contiguous regions returned by
   192   system allocation routines, which is often just 1.
   193 
   194   The implementation is not very modular and seriously overuses
   195   macros. Perhaps someday all C compilers will do as good a job
   196   inlining modular code as can now be done by brute-force expansion,
   197   but now, enough of them seem not to.
   198 
   199   Some compilers issue a lot of warnings about code that is
   200   dead/unreachable only on some platforms, and also about intentional
   201   uses of negation on unsigned types. All known cases of each can be
   202   ignored.
   203 
   204   For a longer but out of date high-level description, see
   205      http://gee.cs.oswego.edu/dl/html/malloc.html
   206 
   207 * MSPACES
   208   If MSPACES is defined, then in addition to malloc, free, etc.,
   209   this file also defines mspace_malloc, mspace_free, etc. These
   210   are versions of malloc routines that take an "mspace" argument
   211   obtained using create_mspace, to control all internal bookkeeping.
   212   If ONLY_MSPACES is defined, only these versions are compiled.
   213   So if you would like to use this allocator for only some allocations,
   214   and your system malloc for others, you can compile with
   215   ONLY_MSPACES and then do something like...
   216     static mspace mymspace = create_mspace(0,0); // for example
   217     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   218 
   219   (Note: If you only need one instance of an mspace, you can instead
   220   use "USE_DL_PREFIX" to relabel the global malloc.)
   221 
   222   You can similarly create thread-local allocators by storing
   223   mspaces as thread-locals. For example:
   224     static __thread mspace tlms = 0;
   225     void*  tlmalloc(size_t bytes) {
   226       if (tlms == 0) tlms = create_mspace(0, 0);
   227       return mspace_malloc(tlms, bytes);
   228     }
   229     void  tlfree(void* mem) { mspace_free(tlms, mem); }
   230 
   231   Unless FOOTERS is defined, each mspace is completely independent.
   232   You cannot allocate from one and free to another (although
   233   conformance is only weakly checked, so usage errors are not always
   234   caught). If FOOTERS is defined, then each chunk carries around a tag
   235   indicating its originating mspace, and frees are directed to their
   236   originating spaces.
   237 
   238  -------------------------  Compile-time options ---------------------------
   239 
   240 Be careful in setting #define values for numerical constants of type
   241 size_t. On some systems, literal values are not automatically extended
   242 to size_t precision unless they are explicitly casted.
   243 
   244 WIN32                    default: defined if _WIN32 defined
   245   Defining WIN32 sets up defaults for MS environment and compilers.
   246   Otherwise defaults are for unix.
   247 
   248 MALLOC_ALIGNMENT         default: (size_t)8
   249   Controls the minimum alignment for malloc'ed chunks.  It must be a
   250   power of two and at least 8, even on machines for which smaller
   251   alignments would suffice. It may be defined as larger than this
   252   though. Note however that code and data structures are optimized for
   253   the case of 8-byte alignment.
   254 
   255 MSPACES                  default: 0 (false)
   256   If true, compile in support for independent allocation spaces.
   257   This is only supported if HAVE_MMAP is true.
   258 
   259 ONLY_MSPACES             default: 0 (false)
   260   If true, only compile in mspace versions, not regular versions.
   261 
   262 USE_LOCKS                default: 0 (false)
   263   Causes each call to each public routine to be surrounded with
   264   pthread or WIN32 mutex lock/unlock. (If set true, this can be
   265   overridden on a per-mspace basis for mspace versions.)
   266 
   267 FOOTERS                  default: 0
   268   If true, provide extra checking and dispatching by placing
   269   information in the footers of allocated chunks. This adds
   270   space and time overhead.
   271 
   272 INSECURE                 default: 0
   273   If true, omit checks for usage errors and heap space overwrites.
   274 
   275 USE_DL_PREFIX            default: NOT defined
   276   Causes compiler to prefix all public routines with the string 'dl'.
   277   This can be useful when you only want to use this malloc in one part
   278   of a program, using your regular system malloc elsewhere.
   279 
   280 ABORT                    default: defined as abort()
   281   Defines how to abort on failed checks.  On most systems, a failed
   282   check cannot die with an "assert" or even print an informative
   283   message, because the underlying print routines in turn call malloc,
   284   which will fail again.  Generally, the best policy is to simply call
   285   abort(). It's not very useful to do more than this because many
   286   errors due to overwriting will show up as address faults (null, odd
   287   addresses etc) rather than malloc-triggered checks, so will also
   288   abort.  Also, most compilers know that abort() does not return, so
   289   can better optimize code conditionally calling it.
   290 
   291 PROCEED_ON_ERROR           default: defined as 0 (false)
   292   Controls whether detected bad addresses cause them to bypassed
   293   rather than aborting. If set, detected bad arguments to free and
   294   realloc are ignored. And all bookkeeping information is zeroed out
   295   upon a detected overwrite of freed heap space, thus losing the
   296   ability to ever return it from malloc again, but enabling the
   297   application to proceed. If PROCEED_ON_ERROR is defined, the
   298   static variable malloc_corruption_error_count is compiled in
   299   and can be examined to see if errors have occurred. This option
   300   generates slower code than the default abort policy.
   301 
   302 DEBUG                    default: NOT defined
   303   The DEBUG setting is mainly intended for people trying to modify
   304   this code or diagnose problems when porting to new platforms.
   305   However, it may also be able to better isolate user errors than just
   306   using runtime checks.  The assertions in the check routines spell
   307   out in more detail the assumptions and invariants underlying the
   308   algorithms.  The checking is fairly extensive, and will slow down
   309   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   310   set will attempt to check every non-mmapped allocated and free chunk
   311   in the course of computing the summaries.
   312 
   313 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   314   Debugging assertion failures can be nearly impossible if your
   315   version of the assert macro causes malloc to be called, which will
   316   lead to a cascade of further failures, blowing the runtime stack.
   317   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   318   which will usually make debugging easier.
   319 
   320 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   321   The action to take before "return 0" when malloc fails to be able to
   322   return memory because there is none available.
   323 
   324 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   325   True if this system supports sbrk or an emulation of it.
   326 
   327 MORECORE                  default: sbrk
   328   The name of the sbrk-style system routine to call to obtain more
   329   memory.  See below for guidance on writing custom MORECORE
   330   functions. The type of the argument to sbrk/MORECORE varies across
   331   systems.  It cannot be size_t, because it supports negative
   332   arguments, so it is normally the signed type of the same width as
   333   size_t (sometimes declared as "intptr_t").  It doesn't much matter
   334   though. Internally, we only call it with arguments less than half
   335   the max value of a size_t, which should work across all reasonable
   336   possibilities, although sometimes generating compiler warnings.  See
   337   near the end of this file for guidelines for creating a custom
   338   version of MORECORE.
   339 
   340 MORECORE_CONTIGUOUS       default: 1 (true)
   341   If true, take advantage of fact that consecutive calls to MORECORE
   342   with positive arguments always return contiguous increasing
   343   addresses.  This is true of unix sbrk. It does not hurt too much to
   344   set it true anyway, since malloc copes with non-contiguities.
   345   Setting it false when definitely non-contiguous saves time
   346   and possibly wasted space it would take to discover this though.
   347 
   348 MORECORE_CANNOT_TRIM      default: NOT defined
   349   True if MORECORE cannot release space back to the system when given
   350   negative arguments. This is generally necessary only if you are
   351   using a hand-crafted MORECORE function that cannot handle negative
   352   arguments.
   353 
   354 HAVE_MMAP                 default: 1 (true)
   355   True if this system supports mmap or an emulation of it.  If so, and
   356   HAVE_MORECORE is not true, MMAP is used for all system
   357   allocation. If set and HAVE_MORECORE is true as well, MMAP is
   358   primarily used to directly allocate very large blocks. It is also
   359   used as a backup strategy in cases where MORECORE fails to provide
   360   space from system. Note: A single call to MUNMAP is assumed to be
   361   able to unmap memory that may have be allocated using multiple calls
   362   to MMAP, so long as they are adjacent.
   363 
   364 HAVE_MREMAP               default: 1 on linux, else 0
   365   If true realloc() uses mremap() to re-allocate large blocks and
   366   extend or shrink allocation spaces.
   367 
   368 MMAP_CLEARS               default: 1 on unix
   369   True if mmap clears memory so calloc doesn't need to. This is true
   370   for standard unix mmap using /dev/zero.
   371 
   372 USE_BUILTIN_FFS            default: 0 (i.e., not used)
   373   Causes malloc to use the builtin ffs() function to compute indices.
   374   Some compilers may recognize and intrinsify ffs to be faster than the
   375   supplied C version. Also, the case of x86 using gcc is special-cased
   376   to an asm instruction, so is already as fast as it can be, and so
   377   this setting has no effect. (On most x86s, the asm version is only
   378   slightly faster than the C version.)
   379 
   380 malloc_getpagesize         default: derive from system includes, or 4096.
   381   The system page size. To the extent possible, this malloc manages
   382   memory from the system in page-size units.  This may be (and
   383   usually is) a function rather than a constant. This is ignored
   384   if WIN32, where page size is determined using getSystemInfo during
   385   initialization.
   386 
   387 USE_DEV_RANDOM             default: 0 (i.e., not used)
   388   Causes malloc to use /dev/random to initialize secure magic seed for
   389   stamping footers. Otherwise, the current time is used.
   390 
   391 NO_MALLINFO                default: 0
   392   If defined, don't compile "mallinfo". This can be a simple way
   393   of dealing with mismatches between system declarations and
   394   those in this file.
   395 
   396 MALLINFO_FIELD_TYPE        default: size_t
   397   The type of the fields in the mallinfo struct. This was originally
   398   defined as "int" in SVID etc, but is more usefully defined as
   399   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   400 
   401 REALLOC_ZERO_BYTES_FREES    default: not defined
   402   This should be set if a call to realloc with zero bytes should 
   403   be the same as a call to free. Some people think it should. Otherwise, 
   404   since this malloc returns a unique pointer for malloc(0), so does 
   405   realloc(p, 0).
   406 
   407 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   408 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   409 LACKS_STDLIB_H                default: NOT defined unless on WIN32
   410   Define these if your system does not have these header files.
   411   You might need to manually insert some of the declarations they provide.
   412 
   413 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   414                                 system_info.dwAllocationGranularity in WIN32,
   415                                 otherwise 64K.
   416       Also settable using mallopt(M_GRANULARITY, x)
   417   The unit for allocating and deallocating memory from the system.  On
   418   most systems with contiguous MORECORE, there is no reason to
   419   make this more than a page. However, systems with MMAP tend to
   420   either require or encourage larger granularities.  You can increase
   421   this value to prevent system allocation functions to be called so
   422   often, especially if they are slow.  The value must be at least one
   423   page and must be a power of two.  Setting to 0 causes initialization
   424   to either page size or win32 region size.  (Note: In previous
   425   versions of malloc, the equivalent of this option was called
   426   "TOP_PAD")
   427 
   428 DEFAULT_TRIM_THRESHOLD    default: 2MB
   429       Also settable using mallopt(M_TRIM_THRESHOLD, x)
   430   The maximum amount of unused top-most memory to keep before
   431   releasing via malloc_trim in free().  Automatic trimming is mainly
   432   useful in long-lived programs using contiguous MORECORE.  Because
   433   trimming via sbrk can be slow on some systems, and can sometimes be
   434   wasteful (in cases where programs immediately afterward allocate
   435   more large chunks) the value should be high enough so that your
   436   overall system performance would improve by releasing this much
   437   memory.  As a rough guide, you might set to a value close to the
   438   average size of a process (program) running on your system.
   439   Releasing this much memory would allow such a process to run in
   440   memory.  Generally, it is worth tuning trim thresholds when a
   441   program undergoes phases where several large chunks are allocated
   442   and released in ways that can reuse each other's storage, perhaps
   443   mixed with phases where there are no such chunks at all. The trim
   444   value must be greater than page size to have any useful effect.  To
   445   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   446   some people use of mallocing a huge space and then freeing it at
   447   program startup, in an attempt to reserve system memory, doesn't
   448   have the intended effect under automatic trimming, since that memory
   449   will immediately be returned to the system.
   450 
   451 DEFAULT_MMAP_THRESHOLD       default: 256K
   452       Also settable using mallopt(M_MMAP_THRESHOLD, x)
   453   The request size threshold for using MMAP to directly service a
   454   request. Requests of at least this size that cannot be allocated
   455   using already-existing space will be serviced via mmap.  (If enough
   456   normal freed space already exists it is used instead.)  Using mmap
   457   segregates relatively large chunks of memory so that they can be
   458   individually obtained and released from the host system. A request
   459   serviced through mmap is never reused by any other request (at least
   460   not directly; the system may just so happen to remap successive
   461   requests to the same locations).  Segregating space in this way has
   462   the benefits that: Mmapped space can always be individually released
   463   back to the system, which helps keep the system level memory demands
   464   of a long-lived program low.  Also, mapped memory doesn't become
   465   `locked' between other chunks, as can happen with normally allocated
   466   chunks, which means that even trimming via malloc_trim would not
   467   release them.  However, it has the disadvantage that the space
   468   cannot be reclaimed, consolidated, and then used to service later
   469   requests, as happens with normal chunks.  The advantages of mmap
   470   nearly always outweigh disadvantages for "large" chunks, but the
   471   value of "large" may vary across systems.  The default is an
   472   empirically derived value that works well in most systems. You can
   473   disable mmap by setting to MAX_SIZE_T.
   474 
   475 */
   476 
   477 #ifndef WIN32
   478 #ifdef _WIN32
   479 #define WIN32 1
   480 #endif  /* _WIN32 */
   481 #endif  /* WIN32 */
   482 #ifdef WIN32
   483 #define WIN32_LEAN_AND_MEAN
   484 #include <windows.h>
   485 #define HAVE_MMAP 1
   486 #define HAVE_MORECORE 0
   487 #define LACKS_UNISTD_H
   488 #define LACKS_SYS_PARAM_H
   489 #define LACKS_SYS_MMAN_H
   490 #define LACKS_STRING_H
   491 #define LACKS_STRINGS_H
   492 #define LACKS_SYS_TYPES_H
   493 #define LACKS_ERRNO_H
   494 #define LACKS_FCNTL_H 
   495 #define MALLOC_FAILURE_ACTION
   496 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   497 #endif  /* WIN32 */
   498 
   499 #if defined(DARWIN) || defined(_DARWIN)
   500 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   501 #ifndef HAVE_MORECORE
   502 #define HAVE_MORECORE 0
   503 #define HAVE_MMAP 1
   504 #endif  /* HAVE_MORECORE */
   505 #endif  /* DARWIN */
   506 
   507 #ifndef LACKS_SYS_TYPES_H
   508 #include <sys/types.h>  /* For size_t */
   509 #endif  /* LACKS_SYS_TYPES_H */
   510 
   511 /* The maximum possible size_t value has all bits set */
   512 #define MAX_SIZE_T           (~(size_t)0)
   513 
   514 #ifndef ONLY_MSPACES
   515 #define ONLY_MSPACES 0
   516 #endif  /* ONLY_MSPACES */
   517 #ifndef MSPACES
   518 #if ONLY_MSPACES
   519 #define MSPACES 1
   520 #else   /* ONLY_MSPACES */
   521 #define MSPACES 0
   522 #endif  /* ONLY_MSPACES */
   523 #endif  /* MSPACES */
   524 #ifndef MALLOC_ALIGNMENT
   525 #define MALLOC_ALIGNMENT ((size_t)8U)
   526 #endif  /* MALLOC_ALIGNMENT */
   527 #ifndef FOOTERS
   528 #define FOOTERS 0
   529 #endif  /* FOOTERS */
   530 #ifndef ABORT
   531 #define ABORT  abort()
   532 #endif  /* ABORT */
   533 #ifndef ABORT_ON_ASSERT_FAILURE
   534 #define ABORT_ON_ASSERT_FAILURE 1
   535 #endif  /* ABORT_ON_ASSERT_FAILURE */
   536 #ifndef PROCEED_ON_ERROR
   537 #define PROCEED_ON_ERROR 0
   538 #endif  /* PROCEED_ON_ERROR */
   539 #ifndef USE_LOCKS
   540 #define USE_LOCKS 0
   541 #endif  /* USE_LOCKS */
   542 #ifndef INSECURE
   543 #define INSECURE 0
   544 #endif  /* INSECURE */
   545 #ifndef HAVE_MMAP
   546 #define HAVE_MMAP 1
   547 #endif  /* HAVE_MMAP */
   548 #ifndef MMAP_CLEARS
   549 #define MMAP_CLEARS 1
   550 #endif  /* MMAP_CLEARS */
   551 #ifndef HAVE_MREMAP
   552 #ifdef linux
   553 #define HAVE_MREMAP 1
   554 #else   /* linux */
   555 #define HAVE_MREMAP 0
   556 #endif  /* linux */
   557 #endif  /* HAVE_MREMAP */
   558 #ifndef MALLOC_FAILURE_ACTION
   559 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   560 #endif  /* MALLOC_FAILURE_ACTION */
   561 #ifndef HAVE_MORECORE
   562 #if ONLY_MSPACES
   563 #define HAVE_MORECORE 0
   564 #else   /* ONLY_MSPACES */
   565 #define HAVE_MORECORE 1
   566 #endif  /* ONLY_MSPACES */
   567 #endif  /* HAVE_MORECORE */
   568 #if !HAVE_MORECORE
   569 #define MORECORE_CONTIGUOUS 0
   570 #else   /* !HAVE_MORECORE */
   571 #ifndef MORECORE
   572 #define MORECORE sbrk
   573 #endif  /* MORECORE */
   574 #ifndef MORECORE_CONTIGUOUS
   575 #define MORECORE_CONTIGUOUS 1
   576 #endif  /* MORECORE_CONTIGUOUS */
   577 #endif  /* HAVE_MORECORE */
   578 #ifndef DEFAULT_GRANULARITY
   579 #if MORECORE_CONTIGUOUS
   580 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   581 #else   /* MORECORE_CONTIGUOUS */
   582 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   583 #endif  /* MORECORE_CONTIGUOUS */
   584 #endif  /* DEFAULT_GRANULARITY */
   585 #ifndef DEFAULT_TRIM_THRESHOLD
   586 #ifndef MORECORE_CANNOT_TRIM
   587 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   588 #else   /* MORECORE_CANNOT_TRIM */
   589 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   590 #endif  /* MORECORE_CANNOT_TRIM */
   591 #endif  /* DEFAULT_TRIM_THRESHOLD */
   592 #ifndef DEFAULT_MMAP_THRESHOLD
   593 #if HAVE_MMAP
   594 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   595 #else   /* HAVE_MMAP */
   596 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   597 #endif  /* HAVE_MMAP */
   598 #endif  /* DEFAULT_MMAP_THRESHOLD */
   599 #ifndef USE_BUILTIN_FFS
   600 #define USE_BUILTIN_FFS 0
   601 #endif  /* USE_BUILTIN_FFS */
   602 #ifndef USE_DEV_RANDOM
   603 #define USE_DEV_RANDOM 0
   604 #endif  /* USE_DEV_RANDOM */
   605 #ifndef NO_MALLINFO
   606 #define NO_MALLINFO 0
   607 #endif  /* NO_MALLINFO */
   608 #ifndef MALLINFO_FIELD_TYPE
   609 #define MALLINFO_FIELD_TYPE size_t
   610 #endif  /* MALLINFO_FIELD_TYPE */
   611 
   612 #define memset	SDL_memset
   613 #define malloc	SDL_malloc
   614 #define calloc	SDL_calloc
   615 #define realloc	SDL_realloc
   616 #define free	SDL_free
   617 
   618 /*
   619   mallopt tuning options.  SVID/XPG defines four standard parameter
   620   numbers for mallopt, normally defined in malloc.h.  None of these
   621   are used in this malloc, so setting them has no effect. But this
   622   malloc does support the following options.
   623 */
   624 
   625 #define M_TRIM_THRESHOLD     (-1)
   626 #define M_GRANULARITY        (-2)
   627 #define M_MMAP_THRESHOLD     (-3)
   628 
   629 /* ------------------------ Mallinfo declarations ------------------------ */
   630 
   631 #if !NO_MALLINFO
   632 /*
   633   This version of malloc supports the standard SVID/XPG mallinfo
   634   routine that returns a struct containing usage properties and
   635   statistics. It should work on any system that has a
   636   /usr/include/malloc.h defining struct mallinfo.  The main
   637   declaration needed is the mallinfo struct that is returned (by-copy)
   638   by mallinfo().  The malloinfo struct contains a bunch of fields that
   639   are not even meaningful in this version of malloc.  These fields are
   640   are instead filled by mallinfo() with other numbers that might be of
   641   interest.
   642 
   643   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   644   /usr/include/malloc.h file that includes a declaration of struct
   645   mallinfo.  If so, it is included; else a compliant version is
   646   declared below.  These must be precisely the same for mallinfo() to
   647   work.  The original SVID version of this struct, defined on most
   648   systems with mallinfo, declares all fields as ints. But some others
   649   define as unsigned long. If your system defines the fields using a
   650   type of different width than listed here, you MUST #include your
   651   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   652 */
   653 
   654 /* #define HAVE_USR_INCLUDE_MALLOC_H */
   655 
   656 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   657 #include "/usr/include/malloc.h"
   658 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   659 
   660 struct mallinfo {
   661   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   662   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   663   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   664   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   665   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   666   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   667   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   668   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   669   MALLINFO_FIELD_TYPE fordblks; /* total free space */
   670   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   671 };
   672 
   673 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   674 #endif /* NO_MALLINFO */
   675 
   676 #ifdef __cplusplus
   677 extern "C" {
   678 #endif /* __cplusplus */
   679 
   680 #if !ONLY_MSPACES
   681 
   682 /* ------------------- Declarations of public routines ------------------- */
   683 
   684 #ifndef USE_DL_PREFIX
   685 #define dlcalloc               calloc
   686 #define dlfree                 free
   687 #define dlmalloc               malloc
   688 #define dlmemalign             memalign
   689 #define dlrealloc              realloc
   690 #define dlvalloc               valloc
   691 #define dlpvalloc              pvalloc
   692 #define dlmallinfo             mallinfo
   693 #define dlmallopt              mallopt
   694 #define dlmalloc_trim          malloc_trim
   695 #define dlmalloc_stats         malloc_stats
   696 #define dlmalloc_usable_size   malloc_usable_size
   697 #define dlmalloc_footprint     malloc_footprint
   698 #define dlmalloc_max_footprint malloc_max_footprint
   699 #define dlindependent_calloc   independent_calloc
   700 #define dlindependent_comalloc independent_comalloc
   701 #endif /* USE_DL_PREFIX */
   702 
   703 
   704 /*
   705   malloc(size_t n)
   706   Returns a pointer to a newly allocated chunk of at least n bytes, or
   707   null if no space is available, in which case errno is set to ENOMEM
   708   on ANSI C systems.
   709 
   710   If n is zero, malloc returns a minimum-sized chunk. (The minimum
   711   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   712   systems.)  Note that size_t is an unsigned type, so calls with
   713   arguments that would be negative if signed are interpreted as
   714   requests for huge amounts of space, which will often fail. The
   715   maximum supported value of n differs across systems, but is in all
   716   cases less than the maximum representable value of a size_t.
   717 */
   718 void* dlmalloc(size_t);
   719 
   720 /*
   721   free(void* p)
   722   Releases the chunk of memory pointed to by p, that had been previously
   723   allocated using malloc or a related routine such as realloc.
   724   It has no effect if p is null. If p was not malloced or already
   725   freed, free(p) will by default cause the current program to abort.
   726 */
   727 void  dlfree(void*);
   728 
   729 /*
   730   calloc(size_t n_elements, size_t element_size);
   731   Returns a pointer to n_elements * element_size bytes, with all locations
   732   set to zero.
   733 */
   734 void* dlcalloc(size_t, size_t);
   735 
   736 /*
   737   realloc(void* p, size_t n)
   738   Returns a pointer to a chunk of size n that contains the same data
   739   as does chunk p up to the minimum of (n, p's size) bytes, or null
   740   if no space is available.
   741 
   742   The returned pointer may or may not be the same as p. The algorithm
   743   prefers extending p in most cases when possible, otherwise it
   744   employs the equivalent of a malloc-copy-free sequence.
   745 
   746   If p is null, realloc is equivalent to malloc.
   747 
   748   If space is not available, realloc returns null, errno is set (if on
   749   ANSI) and p is NOT freed.
   750 
   751   if n is for fewer bytes than already held by p, the newly unused
   752   space is lopped off and freed if possible.  realloc with a size
   753   argument of zero (re)allocates a minimum-sized chunk.
   754 
   755   The old unix realloc convention of allowing the last-free'd chunk
   756   to be used as an argument to realloc is not supported.
   757 */
   758 
   759 void* dlrealloc(void*, size_t);
   760 
   761 /*
   762   memalign(size_t alignment, size_t n);
   763   Returns a pointer to a newly allocated chunk of n bytes, aligned
   764   in accord with the alignment argument.
   765 
   766   The alignment argument should be a power of two. If the argument is
   767   not a power of two, the nearest greater power is used.
   768   8-byte alignment is guaranteed by normal malloc calls, so don't
   769   bother calling memalign with an argument of 8 or less.
   770 
   771   Overreliance on memalign is a sure way to fragment space.
   772 */
   773 void* dlmemalign(size_t, size_t);
   774 
   775 /*
   776   valloc(size_t n);
   777   Equivalent to memalign(pagesize, n), where pagesize is the page
   778   size of the system. If the pagesize is unknown, 4096 is used.
   779 */
   780 void* dlvalloc(size_t);
   781 
   782 /*
   783   mallopt(int parameter_number, int parameter_value)
   784   Sets tunable parameters The format is to provide a
   785   (parameter-number, parameter-value) pair.  mallopt then sets the
   786   corresponding parameter to the argument value if it can (i.e., so
   787   long as the value is meaningful), and returns 1 if successful else
   788   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   789   normally defined in malloc.h.  None of these are use in this malloc,
   790   so setting them has no effect. But this malloc also supports other
   791   options in mallopt. See below for details.  Briefly, supported
   792   parameters are as follows (listed defaults are for "typical"
   793   configurations).
   794 
   795   Symbol            param #  default    allowed param values
   796   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   797   M_GRANULARITY        -2     page size   any power of 2 >= page size
   798   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   799 */
   800 int dlmallopt(int, int);
   801 
   802 /*
   803   malloc_footprint();
   804   Returns the number of bytes obtained from the system.  The total
   805   number of bytes allocated by malloc, realloc etc., is less than this
   806   value. Unlike mallinfo, this function returns only a precomputed
   807   result, so can be called frequently to monitor memory consumption.
   808   Even if locks are otherwise defined, this function does not use them,
   809   so results might not be up to date.
   810 */
   811 size_t dlmalloc_footprint(void);
   812 
   813 /*
   814   malloc_max_footprint();
   815   Returns the maximum number of bytes obtained from the system. This
   816   value will be greater than current footprint if deallocated space
   817   has been reclaimed by the system. The peak number of bytes allocated
   818   by malloc, realloc etc., is less than this value. Unlike mallinfo,
   819   this function returns only a precomputed result, so can be called
   820   frequently to monitor memory consumption.  Even if locks are
   821   otherwise defined, this function does not use them, so results might
   822   not be up to date.
   823 */
   824 size_t dlmalloc_max_footprint(void);
   825 
   826 #if !NO_MALLINFO
   827 /*
   828   mallinfo()
   829   Returns (by copy) a struct containing various summary statistics:
   830 
   831   arena:     current total non-mmapped bytes allocated from system
   832   ordblks:   the number of free chunks
   833   smblks:    always zero.
   834   hblks:     current number of mmapped regions
   835   hblkhd:    total bytes held in mmapped regions
   836   usmblks:   the maximum total allocated space. This will be greater
   837                 than current total if trimming has occurred.
   838   fsmblks:   always zero
   839   uordblks:  current total allocated space (normal or mmapped)
   840   fordblks:  total free space
   841   keepcost:  the maximum number of bytes that could ideally be released
   842                back to system via malloc_trim. ("ideally" means that
   843                it ignores page restrictions etc.)
   844 
   845   Because these fields are ints, but internal bookkeeping may
   846   be kept as longs, the reported values may wrap around zero and
   847   thus be inaccurate.
   848 */
   849 struct mallinfo dlmallinfo(void);
   850 #endif /* NO_MALLINFO */
   851 
   852 /*
   853   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   854 
   855   independent_calloc is similar to calloc, but instead of returning a
   856   single cleared space, it returns an array of pointers to n_elements
   857   independent elements that can hold contents of size elem_size, each
   858   of which starts out cleared, and can be independently freed,
   859   realloc'ed etc. The elements are guaranteed to be adjacently
   860   allocated (this is not guaranteed to occur with multiple callocs or
   861   mallocs), which may also improve cache locality in some
   862   applications.
   863 
   864   The "chunks" argument is optional (i.e., may be null, which is
   865   probably the most typical usage). If it is null, the returned array
   866   is itself dynamically allocated and should also be freed when it is
   867   no longer needed. Otherwise, the chunks array must be of at least
   868   n_elements in length. It is filled in with the pointers to the
   869   chunks.
   870 
   871   In either case, independent_calloc returns this pointer array, or
   872   null if the allocation failed.  If n_elements is zero and "chunks"
   873   is null, it returns a chunk representing an array with zero elements
   874   (which should be freed if not wanted).
   875 
   876   Each element must be individually freed when it is no longer
   877   needed. If you'd like to instead be able to free all at once, you
   878   should instead use regular calloc and assign pointers into this
   879   space to represent elements.  (In this case though, you cannot
   880   independently free elements.)
   881 
   882   independent_calloc simplifies and speeds up implementations of many
   883   kinds of pools.  It may also be useful when constructing large data
   884   structures that initially have a fixed number of fixed-sized nodes,
   885   but the number is not known at compile time, and some of the nodes
   886   may later need to be freed. For example:
   887 
   888   struct Node { int item; struct Node* next; };
   889 
   890   struct Node* build_list() {
   891     struct Node** pool;
   892     int n = read_number_of_nodes_needed();
   893     if (n <= 0) return 0;
   894     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   895     if (pool == 0) die();
   896     // organize into a linked list...
   897     struct Node* first = pool[0];
   898     for (i = 0; i < n-1; ++i)
   899       pool[i]->next = pool[i+1];
   900     free(pool);     // Can now free the array (or not, if it is needed later)
   901     return first;
   902   }
   903 */
   904 void** dlindependent_calloc(size_t, size_t, void**);
   905 
   906 /*
   907   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   908 
   909   independent_comalloc allocates, all at once, a set of n_elements
   910   chunks with sizes indicated in the "sizes" array.    It returns
   911   an array of pointers to these elements, each of which can be
   912   independently freed, realloc'ed etc. The elements are guaranteed to
   913   be adjacently allocated (this is not guaranteed to occur with
   914   multiple callocs or mallocs), which may also improve cache locality
   915   in some applications.
   916 
   917   The "chunks" argument is optional (i.e., may be null). If it is null
   918   the returned array is itself dynamically allocated and should also
   919   be freed when it is no longer needed. Otherwise, the chunks array
   920   must be of at least n_elements in length. It is filled in with the
   921   pointers to the chunks.
   922 
   923   In either case, independent_comalloc returns this pointer array, or
   924   null if the allocation failed.  If n_elements is zero and chunks is
   925   null, it returns a chunk representing an array with zero elements
   926   (which should be freed if not wanted).
   927 
   928   Each element must be individually freed when it is no longer
   929   needed. If you'd like to instead be able to free all at once, you
   930   should instead use a single regular malloc, and assign pointers at
   931   particular offsets in the aggregate space. (In this case though, you
   932   cannot independently free elements.)
   933 
   934   independent_comallac differs from independent_calloc in that each
   935   element may have a different size, and also that it does not
   936   automatically clear elements.
   937 
   938   independent_comalloc can be used to speed up allocation in cases
   939   where several structs or objects must always be allocated at the
   940   same time.  For example:
   941 
   942   struct Head { ... }
   943   struct Foot { ... }
   944 
   945   void send_message(char* msg) {
   946     int msglen = strlen(msg);
   947     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   948     void* chunks[3];
   949     if (independent_comalloc(3, sizes, chunks) == 0)
   950       die();
   951     struct Head* head = (struct Head*)(chunks[0]);
   952     char*        body = (char*)(chunks[1]);
   953     struct Foot* foot = (struct Foot*)(chunks[2]);
   954     // ...
   955   }
   956 
   957   In general though, independent_comalloc is worth using only for
   958   larger values of n_elements. For small values, you probably won't
   959   detect enough difference from series of malloc calls to bother.
   960 
   961   Overuse of independent_comalloc can increase overall memory usage,
   962   since it cannot reuse existing noncontiguous small chunks that
   963   might be available for some of the elements.
   964 */
   965 void** dlindependent_comalloc(size_t, size_t*, void**);
   966 
   967 
   968 /*
   969   pvalloc(size_t n);
   970   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   971   round up n to nearest pagesize.
   972  */
   973 void*  dlpvalloc(size_t);
   974 
   975 /*
   976   malloc_trim(size_t pad);
   977 
   978   If possible, gives memory back to the system (via negative arguments
   979   to sbrk) if there is unused memory at the `high' end of the malloc
   980   pool or in unused MMAP segments. You can call this after freeing
   981   large blocks of memory to potentially reduce the system-level memory
   982   requirements of a program. However, it cannot guarantee to reduce
   983   memory. Under some allocation patterns, some large free blocks of
   984   memory will be locked between two used chunks, so they cannot be
   985   given back to the system.
   986 
   987   The `pad' argument to malloc_trim represents the amount of free
   988   trailing space to leave untrimmed. If this argument is zero, only
   989   the minimum amount of memory to maintain internal data structures
   990   will be left. Non-zero arguments can be supplied to maintain enough
   991   trailing space to service future expected allocations without having
   992   to re-obtain memory from the system.
   993 
   994   Malloc_trim returns 1 if it actually released any memory, else 0.
   995 */
   996 int  dlmalloc_trim(size_t);
   997 
   998 /*
   999   malloc_usable_size(void* p);
  1000 
  1001   Returns the number of bytes you can actually use in
  1002   an allocated chunk, which may be more than you requested (although
  1003   often not) due to alignment and minimum size constraints.
  1004   You can use this many bytes without worrying about
  1005   overwriting other allocated objects. This is not a particularly great
  1006   programming practice. malloc_usable_size can be more useful in
  1007   debugging and assertions, for example:
  1008 
  1009   p = malloc(n);
  1010   assert(malloc_usable_size(p) >= 256);
  1011 */
  1012 size_t dlmalloc_usable_size(void*);
  1013 
  1014 /*
  1015   malloc_stats();
  1016   Prints on stderr the amount of space obtained from the system (both
  1017   via sbrk and mmap), the maximum amount (which may be more than
  1018   current if malloc_trim and/or munmap got called), and the current
  1019   number of bytes allocated via malloc (or realloc, etc) but not yet
  1020   freed. Note that this is the number of bytes allocated, not the
  1021   number requested. It will be larger than the number requested
  1022   because of alignment and bookkeeping overhead. Because it includes
  1023   alignment wastage as being in use, this figure may be greater than
  1024   zero even when no user-level chunks are allocated.
  1025 
  1026   The reported current and maximum system memory can be inaccurate if
  1027   a program makes other calls to system memory allocation functions
  1028   (normally sbrk) outside of malloc.
  1029 
  1030   malloc_stats prints only the most commonly interesting statistics.
  1031   More information can be obtained by calling mallinfo.
  1032 */
  1033 void  dlmalloc_stats(void);
  1034 
  1035 #endif /* ONLY_MSPACES */
  1036 
  1037 #if MSPACES
  1038 
  1039 /*
  1040   mspace is an opaque type representing an independent
  1041   region of space that supports mspace_malloc, etc.
  1042 */
  1043 typedef void* mspace;
  1044 
  1045 /*
  1046   create_mspace creates and returns a new independent space with the
  1047   given initial capacity, or, if 0, the default granularity size.  It
  1048   returns null if there is no system memory available to create the
  1049   space.  If argument locked is non-zero, the space uses a separate
  1050   lock to control access. The capacity of the space will grow
  1051   dynamically as needed to service mspace_malloc requests.  You can
  1052   control the sizes of incremental increases of this space by
  1053   compiling with a different DEFAULT_GRANULARITY or dynamically
  1054   setting with mallopt(M_GRANULARITY, value).
  1055 */
  1056 mspace create_mspace(size_t capacity, int locked);
  1057 
  1058 /*
  1059   destroy_mspace destroys the given space, and attempts to return all
  1060   of its memory back to the system, returning the total number of
  1061   bytes freed. After destruction, the results of access to all memory
  1062   used by the space become undefined.
  1063 */
  1064 size_t destroy_mspace(mspace msp);
  1065 
  1066 /*
  1067   create_mspace_with_base uses the memory supplied as the initial base
  1068   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1069   space is used for bookkeeping, so the capacity must be at least this
  1070   large. (Otherwise 0 is returned.) When this initial space is
  1071   exhausted, additional memory will be obtained from the system.
  1072   Destroying this space will deallocate all additionally allocated
  1073   space (if possible) but not the initial base.
  1074 */
  1075 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1076 
  1077 /*
  1078   mspace_malloc behaves as malloc, but operates within
  1079   the given space.
  1080 */
  1081 void* mspace_malloc(mspace msp, size_t bytes);
  1082 
  1083 /*
  1084   mspace_free behaves as free, but operates within
  1085   the given space.
  1086 
  1087   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1088   free may be called instead of mspace_free because freed chunks from
  1089   any space are handled by their originating spaces.
  1090 */
  1091 void mspace_free(mspace msp, void* mem);
  1092 
  1093 /*
  1094   mspace_realloc behaves as realloc, but operates within
  1095   the given space.
  1096 
  1097   If compiled with FOOTERS==1, mspace_realloc is not actually
  1098   needed.  realloc may be called instead of mspace_realloc because
  1099   realloced chunks from any space are handled by their originating
  1100   spaces.
  1101 */
  1102 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1103 
  1104 /*
  1105   mspace_calloc behaves as calloc, but operates within
  1106   the given space.
  1107 */
  1108 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1109 
  1110 /*
  1111   mspace_memalign behaves as memalign, but operates within
  1112   the given space.
  1113 */
  1114 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1115 
  1116 /*
  1117   mspace_independent_calloc behaves as independent_calloc, but
  1118   operates within the given space.
  1119 */
  1120 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1121                                  size_t elem_size, void* chunks[]);
  1122 
  1123 /*
  1124   mspace_independent_comalloc behaves as independent_comalloc, but
  1125   operates within the given space.
  1126 */
  1127 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1128                                    size_t sizes[], void* chunks[]);
  1129 
  1130 /*
  1131   mspace_footprint() returns the number of bytes obtained from the
  1132   system for this space.
  1133 */
  1134 size_t mspace_footprint(mspace msp);
  1135 
  1136 /*
  1137   mspace_max_footprint() returns the peak number of bytes obtained from the
  1138   system for this space.
  1139 */
  1140 size_t mspace_max_footprint(mspace msp);
  1141 
  1142 
  1143 #if !NO_MALLINFO
  1144 /*
  1145   mspace_mallinfo behaves as mallinfo, but reports properties of
  1146   the given space.
  1147 */
  1148 struct mallinfo mspace_mallinfo(mspace msp);
  1149 #endif /* NO_MALLINFO */
  1150 
  1151 /*
  1152   mspace_malloc_stats behaves as malloc_stats, but reports
  1153   properties of the given space.
  1154 */
  1155 void mspace_malloc_stats(mspace msp);
  1156 
  1157 /*
  1158   mspace_trim behaves as malloc_trim, but
  1159   operates within the given space.
  1160 */
  1161 int mspace_trim(mspace msp, size_t pad);
  1162 
  1163 /*
  1164   An alias for mallopt.
  1165 */
  1166 int mspace_mallopt(int, int);
  1167 
  1168 #endif /* MSPACES */
  1169 
  1170 #ifdef __cplusplus
  1171 };  /* end of extern "C" */
  1172 #endif /* __cplusplus */
  1173 
  1174 /*
  1175   ========================================================================
  1176   To make a fully customizable malloc.h header file, cut everything
  1177   above this line, put into file malloc.h, edit to suit, and #include it
  1178   on the next line, as well as in programs that use this malloc.
  1179   ========================================================================
  1180 */
  1181 
  1182 /* #include "malloc.h" */
  1183 
  1184 /*------------------------------ internal #includes ---------------------- */
  1185 
  1186 #ifdef _MSC_VER
  1187 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1188 #endif /* _MSC_VER */
  1189 
  1190 #ifndef LACKS_STDIO_H
  1191 #include <stdio.h>       /* for printing in malloc_stats */
  1192 #endif
  1193 
  1194 #ifndef LACKS_ERRNO_H
  1195 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1196 #endif /* LACKS_ERRNO_H */
  1197 #if FOOTERS
  1198 #include <time.h>        /* for magic initialization */
  1199 #endif /* FOOTERS */
  1200 #ifndef LACKS_STDLIB_H
  1201 #include <stdlib.h>      /* for abort() */
  1202 #endif /* LACKS_STDLIB_H */
  1203 #ifdef DEBUG
  1204 #if ABORT_ON_ASSERT_FAILURE
  1205 #define assert(x) if(!(x)) ABORT
  1206 #else /* ABORT_ON_ASSERT_FAILURE */
  1207 #include <assert.h>
  1208 #endif /* ABORT_ON_ASSERT_FAILURE */
  1209 #else  /* DEBUG */
  1210 #define assert(x)
  1211 #endif /* DEBUG */
  1212 #ifndef LACKS_STRING_H
  1213 #include <string.h>      /* for memset etc */
  1214 #endif  /* LACKS_STRING_H */
  1215 #if USE_BUILTIN_FFS
  1216 #ifndef LACKS_STRINGS_H
  1217 #include <strings.h>     /* for ffs */
  1218 #endif /* LACKS_STRINGS_H */
  1219 #endif /* USE_BUILTIN_FFS */
  1220 #if HAVE_MMAP
  1221 #ifndef LACKS_SYS_MMAN_H
  1222 #include <sys/mman.h>    /* for mmap */
  1223 #endif /* LACKS_SYS_MMAN_H */
  1224 #ifndef LACKS_FCNTL_H
  1225 #include <fcntl.h>
  1226 #endif /* LACKS_FCNTL_H */
  1227 #endif /* HAVE_MMAP */
  1228 #if HAVE_MORECORE
  1229 #ifndef LACKS_UNISTD_H
  1230 #include <unistd.h>     /* for sbrk */
  1231 #else /* LACKS_UNISTD_H */
  1232 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1233 extern void*     sbrk(ptrdiff_t);
  1234 #endif /* FreeBSD etc */
  1235 #endif /* LACKS_UNISTD_H */
  1236 #endif /* HAVE_MMAP */
  1237 
  1238 #ifndef WIN32
  1239 #ifndef malloc_getpagesize
  1240 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1241 #    ifndef _SC_PAGE_SIZE
  1242 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1243 #    endif
  1244 #  endif
  1245 #  ifdef _SC_PAGE_SIZE
  1246 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1247 #  else
  1248 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1249        extern size_t getpagesize();
  1250 #      define malloc_getpagesize getpagesize()
  1251 #    else
  1252 #      ifdef WIN32 /* use supplied emulation of getpagesize */
  1253 #        define malloc_getpagesize getpagesize()
  1254 #      else
  1255 #        ifndef LACKS_SYS_PARAM_H
  1256 #          include <sys/param.h>
  1257 #        endif
  1258 #        ifdef EXEC_PAGESIZE
  1259 #          define malloc_getpagesize EXEC_PAGESIZE
  1260 #        else
  1261 #          ifdef NBPG
  1262 #            ifndef CLSIZE
  1263 #              define malloc_getpagesize NBPG
  1264 #            else
  1265 #              define malloc_getpagesize (NBPG * CLSIZE)
  1266 #            endif
  1267 #          else
  1268 #            ifdef NBPC
  1269 #              define malloc_getpagesize NBPC
  1270 #            else
  1271 #              ifdef PAGESIZE
  1272 #                define malloc_getpagesize PAGESIZE
  1273 #              else /* just guess */
  1274 #                define malloc_getpagesize ((size_t)4096U)
  1275 #              endif
  1276 #            endif
  1277 #          endif
  1278 #        endif
  1279 #      endif
  1280 #    endif
  1281 #  endif
  1282 #endif
  1283 #endif
  1284 
  1285 /* ------------------- size_t and alignment properties -------------------- */
  1286 
  1287 /* The byte and bit size of a size_t */
  1288 #define SIZE_T_SIZE         (sizeof(size_t))
  1289 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1290 
  1291 /* Some constants coerced to size_t */
  1292 /* Annoying but necessary to avoid errors on some plaftorms */
  1293 #define SIZE_T_ZERO         ((size_t)0)
  1294 #define SIZE_T_ONE          ((size_t)1)
  1295 #define SIZE_T_TWO          ((size_t)2)
  1296 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1297 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1298 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1299 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1300 
  1301 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1302 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1303 
  1304 /* True if address a has acceptable alignment */
  1305 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1306 
  1307 /* the number of bytes to offset an address to align it */
  1308 #define align_offset(A)\
  1309  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1310   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1311 
  1312 /* -------------------------- MMAP preliminaries ------------------------- */
  1313 
  1314 /*
  1315    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1316    checks to fail so compiler optimizer can delete code rather than
  1317    using so many "#if"s.
  1318 */
  1319 
  1320 
  1321 /* MORECORE and MMAP must return MFAIL on failure */
  1322 #define MFAIL                ((void*)(MAX_SIZE_T))
  1323 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1324 
  1325 #if !HAVE_MMAP
  1326 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1327 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1328 #define CALL_MMAP(s)         MFAIL
  1329 #define CALL_MUNMAP(a, s)    (-1)
  1330 #define DIRECT_MMAP(s)       MFAIL
  1331 
  1332 #else /* HAVE_MMAP */
  1333 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1334 #define USE_MMAP_BIT         (SIZE_T_ONE)
  1335 
  1336 #ifndef WIN32
  1337 #define CALL_MUNMAP(a, s)    munmap((a), (s))
  1338 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1339 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1340 #define MAP_ANONYMOUS        MAP_ANON
  1341 #endif /* MAP_ANON */
  1342 #ifdef MAP_ANONYMOUS
  1343 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1344 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1345 #else /* MAP_ANONYMOUS */
  1346 /*
  1347    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1348    is unlikely to be needed, but is supplied just in case.
  1349 */
  1350 #define MMAP_FLAGS           (MAP_PRIVATE)
  1351 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1352 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1353            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1354             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1355             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1356 #endif /* MAP_ANONYMOUS */
  1357 
  1358 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1359 #else /* WIN32 */
  1360 
  1361 /* Win32 MMAP via VirtualAlloc */
  1362 static void* win32mmap(size_t size) {
  1363   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1364   return (ptr != 0)? ptr: MFAIL;
  1365 }
  1366 
  1367 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1368 static void* win32direct_mmap(size_t size) {
  1369   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1370                            PAGE_READWRITE);
  1371   return (ptr != 0)? ptr: MFAIL;
  1372 }
  1373 
  1374 /* This function supports releasing coalesed segments */
  1375 static int win32munmap(void* ptr, size_t size) {
  1376   MEMORY_BASIC_INFORMATION minfo;
  1377   char* cptr = ptr;
  1378   while (size) {
  1379     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1380       return -1;
  1381     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1382         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1383       return -1;
  1384     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1385       return -1;
  1386     cptr += minfo.RegionSize;
  1387     size -= minfo.RegionSize;
  1388   }
  1389   return 0;
  1390 }
  1391 
  1392 #define CALL_MMAP(s)         win32mmap(s)
  1393 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1394 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1395 #endif /* WIN32 */
  1396 #endif /* HAVE_MMAP */
  1397 
  1398 #if HAVE_MMAP && HAVE_MREMAP
  1399 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1400 #else  /* HAVE_MMAP && HAVE_MREMAP */
  1401 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1402 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1403 
  1404 #if HAVE_MORECORE
  1405 #define CALL_MORECORE(S)     MORECORE(S)
  1406 #else  /* HAVE_MORECORE */
  1407 #define CALL_MORECORE(S)     MFAIL
  1408 #endif /* HAVE_MORECORE */
  1409 
  1410 /* mstate bit set if continguous morecore disabled or failed */
  1411 #define USE_NONCONTIGUOUS_BIT (4U)
  1412 
  1413 /* segment bit set in create_mspace_with_base */
  1414 #define EXTERN_BIT            (8U)
  1415 
  1416 
  1417 /* --------------------------- Lock preliminaries ------------------------ */
  1418 
  1419 #if USE_LOCKS
  1420 
  1421 /*
  1422   When locks are defined, there are up to two global locks:
  1423 
  1424   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1425     MORECORE.  In many cases sys_alloc requires two calls, that should
  1426     not be interleaved with calls by other threads.  This does not
  1427     protect against direct calls to MORECORE by other threads not
  1428     using this lock, so there is still code to cope the best we can on
  1429     interference.
  1430 
  1431   * magic_init_mutex ensures that mparams.magic and other
  1432     unique mparams values are initialized only once.
  1433 */
  1434 
  1435 #ifndef WIN32
  1436 /* By default use posix locks */
  1437 #include <pthread.h>
  1438 #define MLOCK_T pthread_mutex_t
  1439 #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1440 #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1441 #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1442 
  1443 #if HAVE_MORECORE
  1444 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1445 #endif /* HAVE_MORECORE */
  1446 
  1447 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1448 
  1449 #else /* WIN32 */
  1450 /*
  1451    Because lock-protected regions have bounded times, and there
  1452    are no recursive lock calls, we can use simple spinlocks.
  1453 */
  1454 
  1455 #define MLOCK_T long
  1456 static int win32_acquire_lock (MLOCK_T *sl) {
  1457   for (;;) {
  1458 #ifdef InterlockedCompareExchangePointer
  1459     if (!InterlockedCompareExchange(sl, 1, 0))
  1460       return 0;
  1461 #else  /* Use older void* version */
  1462     if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1463       return 0;
  1464 #endif /* InterlockedCompareExchangePointer */
  1465     Sleep (0);
  1466   }
  1467 }
  1468 
  1469 static void win32_release_lock (MLOCK_T *sl) {
  1470   InterlockedExchange (sl, 0);
  1471 }
  1472 
  1473 #define INITIAL_LOCK(l)      *(l)=0
  1474 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1475 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1476 #if HAVE_MORECORE
  1477 static MLOCK_T morecore_mutex;
  1478 #endif /* HAVE_MORECORE */
  1479 static MLOCK_T magic_init_mutex;
  1480 #endif /* WIN32 */
  1481 
  1482 #define USE_LOCK_BIT               (2U)
  1483 #else  /* USE_LOCKS */
  1484 #define USE_LOCK_BIT               (0U)
  1485 #define INITIAL_LOCK(l)
  1486 #endif /* USE_LOCKS */
  1487 
  1488 #if USE_LOCKS && HAVE_MORECORE
  1489 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1490 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1491 #else /* USE_LOCKS && HAVE_MORECORE */
  1492 #define ACQUIRE_MORECORE_LOCK()
  1493 #define RELEASE_MORECORE_LOCK()
  1494 #endif /* USE_LOCKS && HAVE_MORECORE */
  1495 
  1496 #if USE_LOCKS
  1497 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1498 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1499 #else  /* USE_LOCKS */
  1500 #define ACQUIRE_MAGIC_INIT_LOCK()
  1501 #define RELEASE_MAGIC_INIT_LOCK()
  1502 #endif /* USE_LOCKS */
  1503 
  1504 
  1505 /* -----------------------  Chunk representations ------------------------ */
  1506 
  1507 /*
  1508   (The following includes lightly edited explanations by Colin Plumb.)
  1509 
  1510   The malloc_chunk declaration below is misleading (but accurate and
  1511   necessary).  It declares a "view" into memory allowing access to
  1512   necessary fields at known offsets from a given base.
  1513 
  1514   Chunks of memory are maintained using a `boundary tag' method as
  1515   originally described by Knuth.  (See the paper by Paul Wilson
  1516   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1517   techniques.)  Sizes of free chunks are stored both in the front of
  1518   each chunk and at the end.  This makes consolidating fragmented
  1519   chunks into bigger chunks fast.  The head fields also hold bits
  1520   representing whether chunks are free or in use.
  1521 
  1522   Here are some pictures to make it clearer.  They are "exploded" to
  1523   show that the state of a chunk can be thought of as extending from
  1524   the high 31 bits of the head field of its header through the
  1525   prev_foot and PINUSE_BIT bit of the following chunk header.
  1526 
  1527   A chunk that's in use looks like:
  1528 
  1529    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1530            | Size of previous chunk (if P = 1)                             |
  1531            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1532          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1533          | Size of this chunk                                         1| +-+
  1534    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1535          |                                                               |
  1536          +-                                                             -+
  1537          |                                                               |
  1538          +-                                                             -+
  1539          |                                                               :
  1540          +-      size - sizeof(size_t) available payload bytes          -+
  1541          :                                                               |
  1542  chunk-> +-                                                             -+
  1543          |                                                               |
  1544          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1545        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1546        | Size of next chunk (may or may not be in use)               | +-+
  1547  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1548 
  1549     And if it's free, it looks like this:
  1550 
  1551    chunk-> +-                                                             -+
  1552            | User payload (must be in use, or we would have merged!)       |
  1553            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1554          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1555          | Size of this chunk                                         0| +-+
  1556    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1557          | Next pointer                                                  |
  1558          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1559          | Prev pointer                                                  |
  1560          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1561          |                                                               :
  1562          +-      size - sizeof(struct chunk) unused bytes               -+
  1563          :                                                               |
  1564  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1565          | Size of this chunk                                            |
  1566          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1567        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1568        | Size of next chunk (must be in use, or we would have merged)| +-+
  1569  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1570        |                                                               :
  1571        +- User payload                                                -+
  1572        :                                                               |
  1573        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1574                                                                      |0|
  1575                                                                      +-+
  1576   Note that since we always merge adjacent free chunks, the chunks
  1577   adjacent to a free chunk must be in use.
  1578 
  1579   Given a pointer to a chunk (which can be derived trivially from the
  1580   payload pointer) we can, in O(1) time, find out whether the adjacent
  1581   chunks are free, and if so, unlink them from the lists that they
  1582   are on and merge them with the current chunk.
  1583 
  1584   Chunks always begin on even word boundaries, so the mem portion
  1585   (which is returned to the user) is also on an even word boundary, and
  1586   thus at least double-word aligned.
  1587 
  1588   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1589   chunk size (which is always a multiple of two words), is an in-use
  1590   bit for the *previous* chunk.  If that bit is *clear*, then the
  1591   word before the current chunk size contains the previous chunk
  1592   size, and can be used to find the front of the previous chunk.
  1593   The very first chunk allocated always has this bit set, preventing
  1594   access to non-existent (or non-owned) memory. If pinuse is set for
  1595   any given chunk, then you CANNOT determine the size of the
  1596   previous chunk, and might even get a memory addressing fault when
  1597   trying to do so.
  1598 
  1599   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1600   the chunk size redundantly records whether the current chunk is
  1601   inuse. This redundancy enables usage checks within free and realloc,
  1602   and reduces indirection when freeing and consolidating chunks.
  1603 
  1604   Each freshly allocated chunk must have both cinuse and pinuse set.
  1605   That is, each allocated chunk borders either a previously allocated
  1606   and still in-use chunk, or the base of its memory arena. This is
  1607   ensured by making all allocations from the the `lowest' part of any
  1608   found chunk.  Further, no free chunk physically borders another one,
  1609   so each free chunk is known to be preceded and followed by either
  1610   inuse chunks or the ends of memory.
  1611 
  1612   Note that the `foot' of the current chunk is actually represented
  1613   as the prev_foot of the NEXT chunk. This makes it easier to
  1614   deal with alignments etc but can be very confusing when trying
  1615   to extend or adapt this code.
  1616 
  1617   The exceptions to all this are
  1618 
  1619      1. The special chunk `top' is the top-most available chunk (i.e.,
  1620         the one bordering the end of available memory). It is treated
  1621         specially.  Top is never included in any bin, is used only if
  1622         no other chunk is available, and is released back to the
  1623         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1624         the top chunk is treated as larger (and thus less well
  1625         fitting) than any other available chunk.  The top chunk
  1626         doesn't update its trailing size field since there is no next
  1627         contiguous chunk that would have to index off it. However,
  1628         space is still allocated for it (TOP_FOOT_SIZE) to enable
  1629         separation or merging when space is extended.
  1630 
  1631      3. Chunks allocated via mmap, which have the lowest-order bit
  1632         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1633         PINUSE_BIT in their head fields.  Because they are allocated
  1634         one-by-one, each must carry its own prev_foot field, which is
  1635         also used to hold the offset this chunk has within its mmapped
  1636         region, which is needed to preserve alignment. Each mmapped
  1637         chunk is trailed by the first two fields of a fake next-chunk
  1638         for sake of usage checks.
  1639 
  1640 */
  1641 
  1642 struct malloc_chunk {
  1643   size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1644   size_t               head;       /* Size and inuse bits. */
  1645   struct malloc_chunk* fd;         /* double links -- used only if free. */
  1646   struct malloc_chunk* bk;
  1647 };
  1648 
  1649 typedef struct malloc_chunk  mchunk;
  1650 typedef struct malloc_chunk* mchunkptr;
  1651 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1652 typedef size_t bindex_t;               /* Described below */
  1653 typedef unsigned int binmap_t;         /* Described below */
  1654 typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1655 
  1656 /* ------------------- Chunks sizes and alignments ----------------------- */
  1657 
  1658 #define MCHUNK_SIZE         (sizeof(mchunk))
  1659 
  1660 #if FOOTERS
  1661 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1662 #else /* FOOTERS */
  1663 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1664 #endif /* FOOTERS */
  1665 
  1666 /* MMapped chunks need a second word of overhead ... */
  1667 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1668 /* ... and additional padding for fake next-chunk at foot */
  1669 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1670 
  1671 /* The smallest size we can malloc is an aligned minimal chunk */
  1672 #define MIN_CHUNK_SIZE\
  1673   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1674 
  1675 /* conversion from malloc headers to user pointers, and back */
  1676 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1677 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1678 /* chunk associated with aligned address A */
  1679 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1680 
  1681 /* Bounds on request (not chunk) sizes. */
  1682 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1683 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1684 
  1685 /* pad request bytes into a usable size */
  1686 #define pad_request(req) \
  1687    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1688 
  1689 /* pad request, checking for minimum (but not maximum) */
  1690 #define request2size(req) \
  1691   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1692 
  1693 
  1694 /* ------------------ Operations on head and foot fields ----------------- */
  1695 
  1696 /*
  1697   The head field of a chunk is or'ed with PINUSE_BIT when previous
  1698   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1699   use. If the chunk was obtained with mmap, the prev_foot field has
  1700   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1701   mmapped region to the base of the chunk.
  1702 */
  1703 
  1704 #define PINUSE_BIT          (SIZE_T_ONE)
  1705 #define CINUSE_BIT          (SIZE_T_TWO)
  1706 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1707 
  1708 /* Head value for fenceposts */
  1709 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1710 
  1711 /* extraction of fields from head words */
  1712 #define cinuse(p)           ((p)->head & CINUSE_BIT)
  1713 #define pinuse(p)           ((p)->head & PINUSE_BIT)
  1714 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1715 
  1716 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1717 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1718 
  1719 /* Treat space at ptr +/- offset as a chunk */
  1720 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1721 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1722 
  1723 /* Ptr to next or previous physical malloc_chunk. */
  1724 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1725 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1726 
  1727 /* extract next chunk's pinuse bit */
  1728 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1729 
  1730 /* Get/set size at footer */
  1731 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1732 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1733 
  1734 /* Set size, pinuse bit, and foot */
  1735 #define set_size_and_pinuse_of_free_chunk(p, s)\
  1736   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1737 
  1738 /* Set size, pinuse bit, foot, and clear next pinuse */
  1739 #define set_free_with_pinuse(p, s, n)\
  1740   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1741 
  1742 #define is_mmapped(p)\
  1743   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1744 
  1745 /* Get the internal overhead associated with chunk p */
  1746 #define overhead_for(p)\
  1747  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1748 
  1749 /* Return true if malloced space is not necessarily cleared */
  1750 #if MMAP_CLEARS
  1751 #define calloc_must_clear(p) (!is_mmapped(p))
  1752 #else /* MMAP_CLEARS */
  1753 #define calloc_must_clear(p) (1)
  1754 #endif /* MMAP_CLEARS */
  1755 
  1756 /* ---------------------- Overlaid data structures ----------------------- */
  1757 
  1758 /*
  1759   When chunks are not in use, they are treated as nodes of either
  1760   lists or trees.
  1761 
  1762   "Small"  chunks are stored in circular doubly-linked lists, and look
  1763   like this:
  1764 
  1765     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1766             |             Size of previous chunk                            |
  1767             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1768     `head:' |             Size of chunk, in bytes                         |P|
  1769       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1770             |             Forward pointer to next chunk in list             |
  1771             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1772             |             Back pointer to previous chunk in list            |
  1773             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1774             |             Unused space (may be 0 bytes long)                .
  1775             .                                                               .
  1776             .                                                               |
  1777 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1778     `foot:' |             Size of chunk, in bytes                           |
  1779             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1780 
  1781   Larger chunks are kept in a form of bitwise digital trees (aka
  1782   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1783   free chunks greater than 256 bytes, their size doesn't impose any
  1784   constraints on user chunk sizes.  Each node looks like:
  1785 
  1786     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1787             |             Size of previous chunk                            |
  1788             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1789     `head:' |             Size of chunk, in bytes                         |P|
  1790       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1791             |             Forward pointer to next chunk of same size        |
  1792             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1793             |             Back pointer to previous chunk of same size       |
  1794             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1795             |             Pointer to left child (child[0])                  |
  1796             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1797             |             Pointer to right child (child[1])                 |
  1798             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1799             |             Pointer to parent                                 |
  1800             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1801             |             bin index of this chunk                           |
  1802             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1803             |             Unused space                                      .
  1804             .                                                               |
  1805 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1806     `foot:' |             Size of chunk, in bytes                           |
  1807             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1808 
  1809   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1810   of the same size are arranged in a circularly-linked list, with only
  1811   the oldest chunk (the next to be used, in our FIFO ordering)
  1812   actually in the tree.  (Tree members are distinguished by a non-null
  1813   parent pointer.)  If a chunk with the same size an an existing node
  1814   is inserted, it is linked off the existing node using pointers that
  1815   work in the same way as fd/bk pointers of small chunks.
  1816 
  1817   Each tree contains a power of 2 sized range of chunk sizes (the
  1818   smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1819   tree level, with the chunks in the smaller half of the range (0x100
  1820   <= x < 0x140 for the top nose) in the left subtree and the larger
  1821   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1822   done by inspecting individual bits.
  1823 
  1824   Using these rules, each node's left subtree contains all smaller
  1825   sizes than its right subtree.  However, the node at the root of each
  1826   subtree has no particular ordering relationship to either.  (The
  1827   dividing line between the subtree sizes is based on trie relation.)
  1828   If we remove the last chunk of a given size from the interior of the
  1829   tree, we need to replace it with a leaf node.  The tree ordering
  1830   rules permit a node to be replaced by any leaf below it.
  1831 
  1832   The smallest chunk in a tree (a common operation in a best-fit
  1833   allocator) can be found by walking a path to the leftmost leaf in
  1834   the tree.  Unlike a usual binary tree, where we follow left child
  1835   pointers until we reach a null, here we follow the right child
  1836   pointer any time the left one is null, until we reach a leaf with
  1837   both child pointers null. The smallest chunk in the tree will be
  1838   somewhere along that path.
  1839 
  1840   The worst case number of steps to add, find, or remove a node is
  1841   bounded by the number of bits differentiating chunks within
  1842   bins. Under current bin calculations, this ranges from 6 up to 21
  1843   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1844   is of course much better.
  1845 */
  1846 
  1847 struct malloc_tree_chunk {
  1848   /* The first four fields must be compatible with malloc_chunk */
  1849   size_t                    prev_foot;
  1850   size_t                    head;
  1851   struct malloc_tree_chunk* fd;
  1852   struct malloc_tree_chunk* bk;
  1853 
  1854   struct malloc_tree_chunk* child[2];
  1855   struct malloc_tree_chunk* parent;
  1856   bindex_t                  index;
  1857 };
  1858 
  1859 typedef struct malloc_tree_chunk  tchunk;
  1860 typedef struct malloc_tree_chunk* tchunkptr;
  1861 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1862 
  1863 /* A little helper macro for trees */
  1864 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1865 
  1866 /* ----------------------------- Segments -------------------------------- */
  1867 
  1868 /*
  1869   Each malloc space may include non-contiguous segments, held in a
  1870   list headed by an embedded malloc_segment record representing the
  1871   top-most space. Segments also include flags holding properties of
  1872   the space. Large chunks that are directly allocated by mmap are not
  1873   included in this list. They are instead independently created and
  1874   destroyed without otherwise keeping track of them.
  1875 
  1876   Segment management mainly comes into play for spaces allocated by
  1877   MMAP.  Any call to MMAP might or might not return memory that is
  1878   adjacent to an existing segment.  MORECORE normally contiguously
  1879   extends the current space, so this space is almost always adjacent,
  1880   which is simpler and faster to deal with. (This is why MORECORE is
  1881   used preferentially to MMAP when both are available -- see
  1882   sys_alloc.)  When allocating using MMAP, we don't use any of the
  1883   hinting mechanisms (inconsistently) supported in various
  1884   implementations of unix mmap, or distinguish reserving from
  1885   committing memory. Instead, we just ask for space, and exploit
  1886   contiguity when we get it.  It is probably possible to do
  1887   better than this on some systems, but no general scheme seems
  1888   to be significantly better.
  1889 
  1890   Management entails a simpler variant of the consolidation scheme
  1891   used for chunks to reduce fragmentation -- new adjacent memory is
  1892   normally prepended or appended to an existing segment. However,
  1893   there are limitations compared to chunk consolidation that mostly
  1894   reflect the fact that segment processing is relatively infrequent
  1895   (occurring only when getting memory from system) and that we
  1896   don't expect to have huge numbers of segments:
  1897 
  1898   * Segments are not indexed, so traversal requires linear scans.  (It
  1899     would be possible to index these, but is not worth the extra
  1900     overhead and complexity for most programs on most platforms.)
  1901   * New segments are only appended to old ones when holding top-most
  1902     memory; if they cannot be prepended to others, they are held in
  1903     different segments.
  1904 
  1905   Except for the top-most segment of an mstate, each segment record
  1906   is kept at the tail of its segment. Segments are added by pushing
  1907   segment records onto the list headed by &mstate.seg for the
  1908   containing mstate.
  1909 
  1910   Segment flags control allocation/merge/deallocation policies:
  1911   * If EXTERN_BIT set, then we did not allocate this segment,
  1912     and so should not try to deallocate or merge with others.
  1913     (This currently holds only for the initial segment passed
  1914     into create_mspace_with_base.)
  1915   * If IS_MMAPPED_BIT set, the segment may be merged with
  1916     other surrounding mmapped segments and trimmed/de-allocated
  1917     using munmap.
  1918   * If neither bit is set, then the segment was obtained using
  1919     MORECORE so can be merged with surrounding MORECORE'd segments
  1920     and deallocated/trimmed using MORECORE with negative arguments.
  1921 */
  1922 
  1923 struct malloc_segment {
  1924   char*        base;             /* base address */
  1925   size_t       size;             /* allocated size */
  1926   struct malloc_segment* next;   /* ptr to next segment */
  1927   flag_t       sflags;           /* mmap and extern flag */
  1928 };
  1929 
  1930 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1931 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1932 
  1933 typedef struct malloc_segment  msegment;
  1934 typedef struct malloc_segment* msegmentptr;
  1935 
  1936 /* ---------------------------- malloc_state ----------------------------- */
  1937 
  1938 /*
  1939    A malloc_state holds all of the bookkeeping for a space.
  1940    The main fields are:
  1941 
  1942   Top
  1943     The topmost chunk of the currently active segment. Its size is
  1944     cached in topsize.  The actual size of topmost space is
  1945     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  1946     fenceposts and segment records if necessary when getting more
  1947     space from the system.  The size at which to autotrim top is
  1948     cached from mparams in trim_check, except that it is disabled if
  1949     an autotrim fails.
  1950 
  1951   Designated victim (dv)
  1952     This is the preferred chunk for servicing small requests that
  1953     don't have exact fits.  It is normally the chunk split off most
  1954     recently to service another small request.  Its size is cached in
  1955     dvsize. The link fields of this chunk are not maintained since it
  1956     is not kept in a bin.
  1957 
  1958   SmallBins
  1959     An array of bin headers for free chunks.  These bins hold chunks
  1960     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  1961     chunks of all the same size, spaced 8 bytes apart.  To simplify
  1962     use in double-linked lists, each bin header acts as a malloc_chunk
  1963     pointing to the real first node, if it exists (else pointing to
  1964     itself).  This avoids special-casing for headers.  But to avoid
  1965     waste, we allocate only the fd/bk pointers of bins, and then use
  1966     repositioning tricks to treat these as the fields of a chunk.
  1967 
  1968   TreeBins
  1969     Treebins are pointers to the roots of trees holding a range of
  1970     sizes. There are 2 equally spaced treebins for each power of two
  1971     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  1972     larger.
  1973 
  1974   Bin maps
  1975     There is one bit map for small bins ("smallmap") and one for
  1976     treebins ("treemap).  Each bin sets its bit when non-empty, and
  1977     clears the bit when empty.  Bit operations are then used to avoid
  1978     bin-by-bin searching -- nearly all "search" is done without ever
  1979     looking at bins that won't be selected.  The bit maps
  1980     conservatively use 32 bits per map word, even if on 64bit system.
  1981     For a good description of some of the bit-based techniques used
  1982     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  1983     supplement at http://hackersdelight.org/). Many of these are
  1984     intended to reduce the branchiness of paths through malloc etc, as
  1985     well as to reduce the number of memory locations read or written.
  1986 
  1987   Segments
  1988     A list of segments headed by an embedded malloc_segment record
  1989     representing the initial space.
  1990 
  1991   Address check support
  1992     The least_addr field is the least address ever obtained from
  1993     MORECORE or MMAP. Attempted frees and reallocs of any address less
  1994     than this are trapped (unless INSECURE is defined).
  1995 
  1996   Magic tag
  1997     A cross-check field that should always hold same value as mparams.magic.
  1998 
  1999   Flags
  2000     Bits recording whether to use MMAP, locks, or contiguous MORECORE
  2001 
  2002   Statistics
  2003     Each space keeps track of current and maximum system memory
  2004     obtained via MORECORE or MMAP.
  2005 
  2006   Locking
  2007     If USE_LOCKS is defined, the "mutex" lock is acquired and released
  2008     around every public call using this mspace.
  2009 */
  2010 
  2011 /* Bin types, widths and sizes */
  2012 #define NSMALLBINS        (32U)
  2013 #define NTREEBINS         (32U)
  2014 #define SMALLBIN_SHIFT    (3U)
  2015 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  2016 #define TREEBIN_SHIFT     (8U)
  2017 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2018 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2019 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2020 
  2021 struct malloc_state {
  2022   binmap_t   smallmap;
  2023   binmap_t   treemap;
  2024   size_t     dvsize;
  2025   size_t     topsize;
  2026   char*      least_addr;
  2027   mchunkptr  dv;
  2028   mchunkptr  top;
  2029   size_t     trim_check;
  2030   size_t     magic;
  2031   mchunkptr  smallbins[(NSMALLBINS+1)*2];
  2032   tbinptr    treebins[NTREEBINS];
  2033   size_t     footprint;
  2034   size_t     max_footprint;
  2035   flag_t     mflags;
  2036 #if USE_LOCKS
  2037   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  2038 #endif /* USE_LOCKS */
  2039   msegment   seg;
  2040 };
  2041 
  2042 typedef struct malloc_state*    mstate;
  2043 
  2044 /* ------------- Global malloc_state and malloc_params ------------------- */
  2045 
  2046 /*
  2047   malloc_params holds global properties, including those that can be
  2048   dynamically set using mallopt. There is a single instance, mparams,
  2049   initialized in init_mparams.
  2050 */
  2051 
  2052 struct malloc_params {
  2053   size_t magic;
  2054   size_t page_size;
  2055   size_t granularity;
  2056   size_t mmap_threshold;
  2057   size_t trim_threshold;
  2058   flag_t default_mflags;
  2059 };
  2060 
  2061 static struct malloc_params mparams;
  2062 
  2063 /* The global malloc_state used for all non-"mspace" calls */
  2064 static struct malloc_state _gm_;
  2065 #define gm                 (&_gm_)
  2066 #define is_global(M)       ((M) == &_gm_)
  2067 #define is_initialized(M)  ((M)->top != 0)
  2068 
  2069 /* -------------------------- system alloc setup ------------------------- */
  2070 
  2071 /* Operations on mflags */
  2072 
  2073 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  2074 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  2075 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  2076 
  2077 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  2078 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  2079 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  2080 
  2081 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  2082 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  2083 
  2084 #define set_lock(M,L)\
  2085  ((M)->mflags = (L)?\
  2086   ((M)->mflags | USE_LOCK_BIT) :\
  2087   ((M)->mflags & ~USE_LOCK_BIT))
  2088 
  2089 /* page-align a size */
  2090 #define page_align(S)\
  2091  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  2092 
  2093 /* granularity-align a size */
  2094 #define granularity_align(S)\
  2095   (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  2096 
  2097 #define is_page_aligned(S)\
  2098    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  2099 #define is_granularity_aligned(S)\
  2100    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  2101 
  2102 /*  True if segment S holds address A */
  2103 #define segment_holds(S, A)\
  2104   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2105 
  2106 /* Return segment holding given address */
  2107 static msegmentptr segment_holding(mstate m, char* addr) {
  2108   msegmentptr sp = &m->seg;
  2109   for (;;) {
  2110     if (addr >= sp->base && addr < sp->base + sp->size)
  2111       return sp;
  2112     if ((sp = sp->next) == 0)
  2113       return 0;
  2114   }
  2115 }
  2116 
  2117 /* Return true if segment contains a segment link */
  2118 static int has_segment_link(mstate m, msegmentptr ss) {
  2119   msegmentptr sp = &m->seg;
  2120   for (;;) {
  2121     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  2122       return 1;
  2123     if ((sp = sp->next) == 0)
  2124       return 0;
  2125   }
  2126 }
  2127 
  2128 #ifndef MORECORE_CANNOT_TRIM
  2129 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2130 #else  /* MORECORE_CANNOT_TRIM */
  2131 #define should_trim(M,s)  (0)
  2132 #endif /* MORECORE_CANNOT_TRIM */
  2133 
  2134 /*
  2135   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2136   that may be needed to place segment records and fenceposts when new
  2137   noncontiguous segments are added.
  2138 */
  2139 #define TOP_FOOT_SIZE\
  2140   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  2141 
  2142 
  2143 /* -------------------------------  Hooks -------------------------------- */
  2144 
  2145 /*
  2146   PREACTION should be defined to return 0 on success, and nonzero on
  2147   failure. If you are not using locking, you can redefine these to do
  2148   anything you like.
  2149 */
  2150 
  2151 #if USE_LOCKS
  2152 
  2153 /* Ensure locks are initialized */
  2154 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  2155 
  2156 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  2157 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2158 #else /* USE_LOCKS */
  2159 
  2160 #ifndef PREACTION
  2161 #define PREACTION(M) (0)
  2162 #endif  /* PREACTION */
  2163 
  2164 #ifndef POSTACTION
  2165 #define POSTACTION(M)
  2166 #endif  /* POSTACTION */
  2167 
  2168 #endif /* USE_LOCKS */
  2169 
  2170 /*
  2171   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2172   USAGE_ERROR_ACTION is triggered on detected bad frees and
  2173   reallocs. The argument p is an address that might have triggered the
  2174   fault. It is ignored by the two predefined actions, but might be
  2175   useful in custom actions that try to help diagnose errors.
  2176 */
  2177 
  2178 #if PROCEED_ON_ERROR
  2179 
  2180 /* A count of the number of corruption errors causing resets */
  2181 int malloc_corruption_error_count;
  2182 
  2183 /* default corruption action */
  2184 static void reset_on_error(mstate m);
  2185 
  2186 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  2187 #define USAGE_ERROR_ACTION(m, p)
  2188 
  2189 #else /* PROCEED_ON_ERROR */
  2190 
  2191 #ifndef CORRUPTION_ERROR_ACTION
  2192 #define CORRUPTION_ERROR_ACTION(m) ABORT
  2193 #endif /* CORRUPTION_ERROR_ACTION */
  2194 
  2195 #ifndef USAGE_ERROR_ACTION
  2196 #define USAGE_ERROR_ACTION(m,p) ABORT
  2197 #endif /* USAGE_ERROR_ACTION */
  2198 
  2199 #endif /* PROCEED_ON_ERROR */
  2200 
  2201 /* -------------------------- Debugging setup ---------------------------- */
  2202 
  2203 #if ! DEBUG
  2204 
  2205 #define check_free_chunk(M,P)
  2206 #define check_inuse_chunk(M,P)
  2207 #define check_malloced_chunk(M,P,N)
  2208 #define check_mmapped_chunk(M,P)
  2209 #define check_malloc_state(M)
  2210 #define check_top_chunk(M,P)
  2211 
  2212 #else /* DEBUG */
  2213 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  2214 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  2215 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2216 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2217 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2218 #define check_malloc_state(M)       do_check_malloc_state(M)
  2219 
  2220 static void   do_check_any_chunk(mstate m, mchunkptr p);
  2221 static void   do_check_top_chunk(mstate m, mchunkptr p);
  2222 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  2223 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  2224 static void   do_check_free_chunk(mstate m, mchunkptr p);
  2225 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  2226 static void   do_check_tree(mstate m, tchunkptr t);
  2227 static void   do_check_treebin(mstate m, bindex_t i);
  2228 static void   do_check_smallbin(mstate m, bindex_t i);
  2229 static void   do_check_malloc_state(mstate m);
  2230 static int    bin_find(mstate m, mchunkptr x);
  2231 static size_t traverse_and_check(mstate m);
  2232 #endif /* DEBUG */
  2233 
  2234 /* ---------------------------- Indexing Bins ---------------------------- */
  2235 
  2236 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  2237 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  2238 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  2239 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  2240 
  2241 /* addressing by index. See above about smallbin repositioning */
  2242 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  2243 #define treebin_at(M,i)     (&((M)->treebins[i]))
  2244 
  2245 /* assign tree index for size S to variable I */
  2246 #if defined(__GNUC__) && defined(i386)
  2247 #define compute_tree_index(S, I)\
  2248 {\
  2249   size_t X = S >> TREEBIN_SHIFT;\
  2250   if (X == 0)\
  2251     I = 0;\
  2252   else if (X > 0xFFFF)\
  2253     I = NTREEBINS-1;\
  2254   else {\
  2255     unsigned int K;\
  2256     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  2257     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  2258   }\
  2259 }
  2260 #else /* GNUC */
  2261 #define compute_tree_index(S, I)\
  2262 {\
  2263   size_t X = S >> TREEBIN_SHIFT;\
  2264   if (X == 0)\
  2265     I = 0;\
  2266   else if (X > 0xFFFF)\
  2267     I = NTREEBINS-1;\
  2268   else {\
  2269     unsigned int Y = (unsigned int)X;\
  2270     unsigned int N = ((Y - 0x100) >> 16) & 8;\
  2271     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  2272     N += K;\
  2273     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  2274     K = 14 - N + ((Y <<= K) >> 15);\
  2275     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  2276   }\
  2277 }
  2278 #endif /* GNUC */
  2279 
  2280 /* Bit representing maximum resolved size in a treebin at i */
  2281 #define bit_for_tree_index(i) \
  2282    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  2283 
  2284 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
  2285 #define leftshift_for_tree_index(i) \
  2286    ((i == NTREEBINS-1)? 0 : \
  2287     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  2288 
  2289 /* The size of the smallest chunk held in bin with index i */
  2290 #define minsize_for_tree_index(i) \
  2291    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  2292    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  2293 
  2294 
  2295 /* ------------------------ Operations on bin maps ----------------------- */
  2296 
  2297 /* bit corresponding to given index */
  2298 #define idx2bit(i)              ((binmap_t)(1) << (i))
  2299 
  2300 /* Mark/Clear bits with given index */
  2301 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  2302 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  2303 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  2304 
  2305 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  2306 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  2307 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  2308 
  2309 /* index corresponding to given bit */
  2310 
  2311 #if defined(__GNUC__) && defined(i386)
  2312 #define compute_bit2idx(X, I)\
  2313 {\
  2314   unsigned int J;\
  2315   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  2316   I = (bindex_t)J;\
  2317 }
  2318 
  2319 #else /* GNUC */
  2320 #if  USE_BUILTIN_FFS
  2321 #define compute_bit2idx(X, I) I = ffs(X)-1
  2322 
  2323 #else /* USE_BUILTIN_FFS */
  2324 #define compute_bit2idx(X, I)\
  2325 {\
  2326   unsigned int Y = X - 1;\
  2327   unsigned int K = Y >> (16-4) & 16;\
  2328   unsigned int N = K;        Y >>= K;\
  2329   N += K = Y >> (8-3) &  8;  Y >>= K;\
  2330   N += K = Y >> (4-2) &  4;  Y >>= K;\
  2331   N += K = Y >> (2-1) &  2;  Y >>= K;\
  2332   N += K = Y >> (1-0) &  1;  Y >>= K;\
  2333   I = (bindex_t)(N + Y);\
  2334 }
  2335 #endif /* USE_BUILTIN_FFS */
  2336 #endif /* GNUC */
  2337 
  2338 /* isolate the least set bit of a bitmap */
  2339 #define least_bit(x)         ((x) & -(x))
  2340 
  2341 /* mask with all bits to left of least bit of x on */
  2342 #define left_bits(x)         ((x<<1) | -(x<<1))
  2343 
  2344 /* mask with all bits to left of or equal to least bit of x on */
  2345 #define same_or_left_bits(x) ((x) | -(x))
  2346 
  2347 
  2348 /* ----------------------- Runtime Check Support ------------------------- */
  2349 
  2350 /*
  2351   For security, the main invariant is that malloc/free/etc never
  2352   writes to a static address other than malloc_state, unless static
  2353   malloc_state itself has been corrupted, which cannot occur via
  2354   malloc (because of these checks). In essence this means that we
  2355   believe all pointers, sizes, maps etc held in malloc_state, but
  2356   check all of those linked or offsetted from other embedded data
  2357   structures.  These checks are interspersed with main code in a way
  2358   that tends to minimize their run-time cost.
  2359 
  2360   When FOOTERS is defined, in addition to range checking, we also
  2361   verify footer fields of inuse chunks, which can be used guarantee
  2362   that the mstate controlling malloc/free is intact.  This is a
  2363   streamlined version of the approach described by William Robertson
  2364   et al in "Run-time Detection of Heap-based Overflows" LISA'03
  2365   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  2366   of an inuse chunk holds the xor of its mstate and a random seed,
  2367   that is checked upon calls to free() and realloc().  This is
  2368   (probablistically) unguessable from outside the program, but can be
  2369   computed by any code successfully malloc'ing any chunk, so does not
  2370   itself provide protection against code that has already broken
  2371   security through some other means.  Unlike Robertson et al, we
  2372   always dynamically check addresses of all offset chunks (previous,
  2373   next, etc). This turns out to be cheaper than relying on hashes.
  2374 */
  2375 
  2376 #if !INSECURE
  2377 /* Check if address a is at least as high as any from MORECORE or MMAP */
  2378 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  2379 /* Check if address of next chunk n is higher than base chunk p */
  2380 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
  2381 /* Check if p has its cinuse bit on */
  2382 #define ok_cinuse(p)     cinuse(p)
  2383 /* Check if p has its pinuse bit on */
  2384 #define ok_pinuse(p)     pinuse(p)
  2385 
  2386 #else /* !INSECURE */
  2387 #define ok_address(M, a) (1)
  2388 #define ok_next(b, n)    (1)
  2389 #define ok_cinuse(p)     (1)
  2390 #define ok_pinuse(p)     (1)
  2391 #endif /* !INSECURE */
  2392 
  2393 #if (FOOTERS && !INSECURE)
  2394 /* Check if (alleged) mstate m has expected magic field */
  2395 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2396 #else  /* (FOOTERS && !INSECURE) */
  2397 #define ok_magic(M)      (1)
  2398 #endif /* (FOOTERS && !INSECURE) */
  2399 
  2400 
  2401 /* In gcc, use __builtin_expect to minimize impact of checks */
  2402 #if !INSECURE
  2403 #if defined(__GNUC__) && __GNUC__ >= 3
  2404 #define RTCHECK(e)  __builtin_expect(e, 1)
  2405 #else /* GNUC */
  2406 #define RTCHECK(e)  (e)
  2407 #endif /* GNUC */
  2408 #else /* !INSECURE */
  2409 #define RTCHECK(e)  (1)
  2410 #endif /* !INSECURE */
  2411 
  2412 /* macros to set up inuse chunks with or without footers */
  2413 
  2414 #if !FOOTERS
  2415 
  2416 #define mark_inuse_foot(M,p,s)
  2417 
  2418 /* Set cinuse bit and pinuse bit of next chunk */
  2419 #define set_inuse(M,p,s)\
  2420   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2421   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2422 
  2423 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  2424 #define set_inuse_and_pinuse(M,p,s)\
  2425   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2426   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2427 
  2428 /* Set size, cinuse and pinuse bit of this chunk */
  2429 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2430   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  2431 
  2432 #else /* FOOTERS */
  2433 
  2434 /* Set foot of inuse chunk to be xor of mstate and seed */
  2435 #define mark_inuse_foot(M,p,s)\
  2436   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  2437 
  2438 #define get_mstate_for(p)\
  2439   ((mstate)(((mchunkptr)((char*)(p) +\
  2440     (chunksize(p))))->prev_foot ^ mparams.magic))
  2441 
  2442 #define set_inuse(M,p,s)\
  2443   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2444   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  2445   mark_inuse_foot(M,p,s))
  2446 
  2447 #define set_inuse_and_pinuse(M,p,s)\
  2448   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2449   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  2450  mark_inuse_foot(M,p,s))
  2451 
  2452 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2453   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2454   mark_inuse_foot(M, p, s))
  2455 
  2456 #endif /* !FOOTERS */
  2457 
  2458 /* ---------------------------- setting mparams -------------------------- */
  2459 
  2460 /* Initialize mparams */
  2461 static int init_mparams(void) {
  2462   if (mparams.page_size == 0) {
  2463     size_t s;
  2464 
  2465     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2466     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2467 #if MORECORE_CONTIGUOUS
  2468     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  2469 #else  /* MORECORE_CONTIGUOUS */
  2470     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  2471 #endif /* MORECORE_CONTIGUOUS */
  2472 
  2473 #if (FOOTERS && !INSECURE)
  2474     {
  2475 #if USE_DEV_RANDOM
  2476       int fd;
  2477       unsigned char buf[sizeof(size_t)];
  2478       /* Try to use /dev/urandom, else fall back on using time */
  2479       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2480           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2481         s = *((size_t *) buf);
  2482         close(fd);
  2483       }
  2484       else
  2485 #endif /* USE_DEV_RANDOM */
  2486         s = (size_t)(time(0) ^ (size_t)0x55555555U);
  2487 
  2488       s |= (size_t)8U;    /* ensure nonzero */
  2489       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  2490 
  2491     }
  2492 #else /* (FOOTERS && !INSECURE) */
  2493     s = (size_t)0x58585858U;
  2494 #endif /* (FOOTERS && !INSECURE) */
  2495     ACQUIRE_MAGIC_INIT_LOCK();
  2496     if (mparams.magic == 0) {
  2497       mparams.magic = s;
  2498       /* Set up lock for main malloc area */
  2499       INITIAL_LOCK(&gm->mutex);
  2500       gm->mflags = mparams.default_mflags;
  2501     }
  2502     RELEASE_MAGIC_INIT_LOCK();
  2503 
  2504 #ifndef WIN32
  2505     mparams.page_size = malloc_getpagesize;
  2506     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  2507                            DEFAULT_GRANULARITY : mparams.page_size);
  2508 #else /* WIN32 */
  2509     {
  2510       SYSTEM_INFO system_info;
  2511       GetSystemInfo(&system_info);
  2512       mparams.page_size = system_info.dwPageSize;
  2513       mparams.granularity = system_info.dwAllocationGranularity;
  2514     }
  2515 #endif /* WIN32 */
  2516 
  2517     /* Sanity-check configuration:
  2518        size_t must be unsigned and as wide as pointer type.
  2519        ints must be at least 4 bytes.
  2520        alignment must be at least 8.
  2521        Alignment, min chunk size, and page size must all be powers of 2.
  2522     */
  2523     if ((sizeof(size_t) != sizeof(char*)) ||
  2524         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  2525         (sizeof(int) < 4)  ||
  2526         (MALLOC_ALIGNMENT < (size_t)8U) ||
  2527         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  2528         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  2529         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  2530         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  2531       ABORT;
  2532   }
  2533   return 0;
  2534 }
  2535 
  2536 /* support for mallopt */
  2537 static int change_mparam(int param_number, int value) {
  2538   size_t val = (size_t)value;
  2539   init_mparams();
  2540   switch(param_number) {
  2541   case M_TRIM_THRESHOLD:
  2542     mparams.trim_threshold = val;
  2543     return 1;
  2544   case M_GRANULARITY:
  2545     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  2546       mparams.granularity = val;
  2547       return 1;
  2548     }
  2549     else
  2550       return 0;
  2551   case M_MMAP_THRESHOLD:
  2552     mparams.mmap_threshold = val;
  2553     return 1;
  2554   default:
  2555     return 0;
  2556   }
  2557 }
  2558 
  2559 #if DEBUG
  2560 /* ------------------------- Debugging Support --------------------------- */
  2561 
  2562 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2563 static void do_check_any_chunk(mstate m, mchunkptr p) {
  2564   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2565   assert(ok_address(m, p));
  2566 }
  2567 
  2568 /* Check properties of top chunk */
  2569 static void do_check_top_chunk(mstate m, mchunkptr p) {
  2570   msegmentptr sp = segment_holding(m, (char*)p);
  2571   size_t  sz = chunksize(p);
  2572   assert(sp != 0);
  2573   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2574   assert(ok_address(m, p));
  2575   assert(sz == m->topsize);
  2576   assert(sz > 0);
  2577   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  2578   assert(pinuse(p));
  2579   assert(!next_pinuse(p));
  2580 }
  2581 
  2582 /* Check properties of (inuse) mmapped chunks */
  2583 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  2584   size_t  sz = chunksize(p);
  2585   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2586   assert(is_mmapped(p));
  2587   assert(use_mmap(m));
  2588   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2589   assert(ok_address(m, p));
  2590   assert(!is_small(sz));
  2591   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  2592   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2593   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  2594 }
  2595 
  2596 /* Check properties of inuse chunks */
  2597 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  2598   do_check_any_chunk(m, p);
  2599   assert(cinuse(p));
  2600   assert(next_pinuse(p));
  2601   /* If not pinuse and not mmapped, previous chunk has OK offset */
  2602   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2603   if (is_mmapped(p))
  2604     do_check_mmapped_chunk(m, p);
  2605 }
  2606 
  2607 /* Check properties of free chunks */
  2608 static void do_check_free_chunk(mstate m, mchunkptr p) {
  2609   size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2610   mchunkptr next = chunk_plus_offset(p, sz);
  2611   do_check_any_chunk(m, p);
  2612   assert(!cinuse(p));
  2613   assert(!next_pinuse(p));
  2614   assert (!is_mmapped(p));
  2615   if (p != m->dv && p != m->top) {
  2616     if (sz >= MIN_CHUNK_SIZE) {
  2617       assert((sz & CHUNK_ALIGN_MASK) == 0);
  2618       assert(is_aligned(chunk2mem(p)));
  2619       assert(next->prev_foot == sz);
  2620       assert(pinuse(p));
  2621       assert (next == m->top || cinuse(next));
  2622       assert(p->fd->bk == p);
  2623       assert(p->bk->fd == p);
  2624     }
  2625     else  /* markers are always of size SIZE_T_SIZE */
  2626       assert(sz == SIZE_T_SIZE);
  2627   }
  2628 }
  2629 
  2630 /* Check properties of malloced chunks at the point they are malloced */
  2631 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  2632   if (mem != 0) {
  2633     mchunkptr p = mem2chunk(mem);
  2634     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2635     do_check_inuse_chunk(m, p);
  2636     assert((sz & CHUNK_ALIGN_MASK) == 0);
  2637     assert(sz >= MIN_CHUNK_SIZE);
  2638     assert(sz >= s);
  2639     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2640     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2641   }
  2642 }
  2643 
  2644 /* Check a tree and its subtrees.  */
  2645 static void do_check_tree(mstate m, tchunkptr t) {
  2646   tchunkptr head = 0;
  2647   tchunkptr u = t;
  2648   bindex_t tindex = t->index;
  2649   size_t tsize = chunksize(t);
  2650   bindex_t idx;
  2651   compute_tree_index(tsize, idx);
  2652   assert(tindex == idx);
  2653   assert(tsize >= MIN_LARGE_SIZE);
  2654   assert(tsize >= minsize_for_tree_index(idx));
  2655   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  2656 
  2657   do { /* traverse through chain of same-sized nodes */
  2658     do_check_any_chunk(m, ((mchunkptr)u));
  2659     assert(u->index == tindex);
  2660     assert(chunksize(u) == tsize);
  2661     assert(!cinuse(u));
  2662     assert(!next_pinuse(u));
  2663     assert(u->fd->bk == u);
  2664     assert(u->bk->fd == u);
  2665     if (u->parent == 0) {
  2666       assert(u->child[0] == 0);
  2667       assert(u->child[1] == 0);
  2668     }
  2669     else {
  2670       assert(head == 0); /* only one node on chain has parent */
  2671       head = u;
  2672       assert(u->parent != u);
  2673       assert (u->parent->child[0] == u ||
  2674               u->parent->child[1] == u ||
  2675               *((tbinptr*)(u->parent)) == u);
  2676       if (u->child[0] != 0) {
  2677         assert(u->child[0]->parent == u);
  2678         assert(u->child[0] != u);
  2679         do_check_tree(m, u->child[0]);
  2680       }
  2681       if (u->child[1] != 0) {
  2682         assert(u->child[1]->parent == u);
  2683         assert(u->child[1] != u);
  2684         do_check_tree(m, u->child[1]);
  2685       }
  2686       if (u->child[0] != 0 && u->child[1] != 0) {
  2687         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2688       }
  2689     }
  2690     u = u->fd;
  2691   } while (u != t);
  2692   assert(head != 0);
  2693 }
  2694 
  2695 /*  Check all the chunks in a treebin.  */
  2696 static void do_check_treebin(mstate m, bindex_t i) {
  2697   tbinptr* tb = treebin_at(m, i);
  2698   tchunkptr t = *tb;
  2699   int empty = (m->treemap & (1U << i)) == 0;
  2700   if (t == 0)
  2701     assert(empty);
  2702   if (!empty)
  2703     do_check_tree(m, t);
  2704 }
  2705 
  2706 /*  Check all the chunks in a smallbin.  */
  2707 static void do_check_smallbin(mstate m, bindex_t i) {
  2708   sbinptr b = smallbin_at(m, i);
  2709   mchunkptr p = b->bk;
  2710   unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2711   if (p == b)
  2712     assert(empty);
  2713   if (!empty) {
  2714     for (; p != b; p = p->bk) {
  2715       size_t size = chunksize(p);
  2716       mchunkptr q;
  2717       /* each chunk claims to be free */
  2718       do_check_free_chunk(m, p);
  2719       /* chunk belongs in bin */
  2720       assert(small_index(size) == i);
  2721       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2722       /* chunk is followed by an inuse chunk */
  2723       q = next_chunk(p);
  2724       if (q->head != FENCEPOST_HEAD)
  2725         do_check_inuse_chunk(m, q);
  2726     }
  2727   }
  2728 }
  2729 
  2730 /* Find x in a bin. Used in other check functions. */
  2731 static int bin_find(mstate m, mchunkptr x) {
  2732   size_t size = chunksize(x);
  2733   if (is_small(size)) {
  2734     bindex_t sidx = small_index(size);
  2735     sbinptr b = smallbin_at(m, sidx);
  2736     if (smallmap_is_marked(m, sidx)) {
  2737       mchunkptr p = b;
  2738       do {
  2739         if (p == x)
  2740           return 1;
  2741       } while ((p = p->fd) != b);
  2742     }
  2743   }
  2744   else {
  2745     bindex_t tidx;
  2746     compute_tree_index(size, tidx);
  2747     if (treemap_is_marked(m, tidx)) {
  2748       tchunkptr t = *treebin_at(m, tidx);
  2749       size_t sizebits = size << leftshift_for_tree_index(tidx);
  2750       while (t != 0 && chunksize(t) != size) {
  2751         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  2752         sizebits <<= 1;
  2753       }
  2754       if (t != 0) {
  2755         tchunkptr u = t;
  2756         do {
  2757           if (u == (tchunkptr)x)
  2758             return 1;
  2759         } while ((u = u->fd) != t);
  2760       }
  2761     }
  2762   }
  2763   return 0;
  2764 }
  2765 
  2766 /* Traverse each chunk and check it; return total */
  2767 static size_t traverse_and_check(mstate m) {
  2768   size_t sum = 0;
  2769   if (is_initialized(m)) {
  2770     msegmentptr s = &m->seg;
  2771     sum += m->topsize + TOP_FOOT_SIZE;
  2772     while (s != 0) {
  2773       mchunkptr q = align_as_chunk(s->base);
  2774       mchunkptr lastq = 0;
  2775       assert(pinuse(q));
  2776       while (segment_holds(s, q) &&
  2777              q != m->top && q->head != FENCEPOST_HEAD) {
  2778         sum += chunksize(q);
  2779         if (cinuse(q)) {
  2780           assert(!bin_find(m, q));
  2781           do_check_inuse_chunk(m, q);
  2782         }
  2783         else {
  2784           assert(q == m->dv || bin_find(m, q));
  2785           assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  2786           do_check_free_chunk(m, q);
  2787         }
  2788         lastq = q;
  2789         q = next_chunk(q);
  2790       }
  2791       s = s->next;
  2792     }
  2793   }
  2794   return sum;
  2795 }
  2796 
  2797 /* Check all properties of malloc_state. */
  2798 static void do_check_malloc_state(mstate m) {
  2799   bindex_t i;
  2800   size_t total;
  2801   /* check bins */
  2802   for (i = 0; i < NSMALLBINS; ++i)
  2803     do_check_smallbin(m, i);
  2804   for (i = 0; i < NTREEBINS; ++i)
  2805     do_check_treebin(m, i);
  2806 
  2807   if (m->dvsize != 0) { /* check dv chunk */
  2808     do_check_any_chunk(m, m->dv);
  2809     assert(m->dvsize == chunksize(m->dv));
  2810     assert(m->dvsize >= MIN_CHUNK_SIZE);
  2811     assert(bin_find(m, m->dv) == 0);
  2812   }
  2813 
  2814   if (m->top != 0) {   /* check top chunk */
  2815     do_check_top_chunk(m, m->top);
  2816     assert(m->topsize == chunksize(m->top));
  2817     assert(m->topsize > 0);
  2818     assert(bin_find(m, m->top) == 0);
  2819   }
  2820 
  2821   total = traverse_and_check(m);
  2822   assert(total <= m->footprint);
  2823   assert(m->footprint <= m->max_footprint);
  2824 }
  2825 #endif /* DEBUG */
  2826 
  2827 /* ----------------------------- statistics ------------------------------ */
  2828 
  2829 #if !NO_MALLINFO
  2830 static struct mallinfo internal_mallinfo(mstate m) {
  2831   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  2832   if (!PREACTION(m)) {
  2833     check_malloc_state(m);
  2834     if (is_initialized(m)) {
  2835       size_t nfree = SIZE_T_ONE; /* top always free */
  2836       size_t mfree = m->topsize + TOP_FOOT_SIZE;
  2837       size_t sum = mfree;
  2838       msegmentptr s = &m->seg;
  2839       while (s != 0) {
  2840         mchunkptr q = align_as_chunk(s->base);
  2841         while (segment_holds(s, q) &&
  2842                q != m->top && q->head != FENCEPOST_HEAD) {
  2843           size_t sz = chunksize(q);
  2844           sum += sz;
  2845           if (!cinuse(q)) {
  2846             mfree += sz;
  2847             ++nfree;
  2848           }
  2849           q = next_chunk(q);
  2850         }
  2851         s = s->next;
  2852       }
  2853 
  2854       nm.arena    = sum;
  2855       nm.ordblks  = nfree;
  2856       nm.hblkhd   = m->footprint - sum;
  2857       nm.usmblks  = m->max_footprint;
  2858       nm.uordblks = m->footprint - mfree;
  2859       nm.fordblks = mfree;
  2860       nm.keepcost = m->topsize;
  2861     }
  2862 
  2863     POSTACTION(m);
  2864   }
  2865   return nm;
  2866 }
  2867 #endif /* !NO_MALLINFO */
  2868 
  2869 static void internal_malloc_stats(mstate m) {
  2870   if (!PREACTION(m)) {
  2871     size_t maxfp = 0;
  2872     size_t fp = 0;
  2873     size_t used = 0;
  2874     check_malloc_state(m);
  2875     if (is_initialized(m)) {
  2876       msegmentptr s = &m->seg;
  2877       maxfp = m->max_footprint;
  2878       fp = m->footprint;
  2879       used = fp - (m->topsize + TOP_FOOT_SIZE);
  2880 
  2881       while (s != 0) {
  2882         mchunkptr q = align_as_chunk(s->base);
  2883         while (segment_holds(s, q) &&
  2884                q != m->top && q->head != FENCEPOST_HEAD) {
  2885           if (!cinuse(q))
  2886             used -= chunksize(q);
  2887           q = next_chunk(q);
  2888         }
  2889         s = s->next;
  2890       }
  2891     }
  2892 
  2893 #ifndef LACKS_STDIO_H
  2894     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  2895     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  2896     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  2897 #endif
  2898 
  2899     POSTACTION(m);
  2900   }
  2901 }
  2902 
  2903 /* ----------------------- Operations on smallbins ----------------------- */
  2904 
  2905 /*
  2906   Various forms of linking and unlinking are defined as macros.  Even
  2907   the ones for trees, which are very long but have very short typical
  2908   paths.  This is ugly but reduces reliance on inlining support of
  2909   compilers.
  2910 */
  2911 
  2912 /* Link a free chunk into a smallbin  */
  2913 #define insert_small_chunk(M, P, S) {\
  2914   bindex_t I  = small_index(S);\
  2915   mchunkptr B = smallbin_at(M, I);\
  2916   mchunkptr F = B;\
  2917   assert(S >= MIN_CHUNK_SIZE);\
  2918   if (!smallmap_is_marked(M, I))\
  2919     mark_smallmap(M, I);\
  2920   else if (RTCHECK(ok_address(M, B->fd)))\
  2921     F = B->fd;\
  2922   else {\
  2923     CORRUPTION_ERROR_ACTION(M);\
  2924   }\
  2925   B->fd = P;\
  2926   F->bk = P;\
  2927   P->fd = F;\
  2928   P->bk = B;\
  2929 }
  2930 
  2931 /* Unlink a chunk from a smallbin  */
  2932 #define unlink_small_chunk(M, P, S) {\
  2933   mchunkptr F = P->fd;\
  2934   mchunkptr B = P->bk;\
  2935   bindex_t I = small_index(S);\
  2936   assert(P != B);\
  2937   assert(P != F);\
  2938   assert(chunksize(P) == small_index2size(I));\
  2939   if (F == B)\
  2940     clear_smallmap(M, I);\
  2941   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  2942                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  2943     F->bk = B;\
  2944     B->fd = F;\
  2945   }\
  2946   else {\
  2947     CORRUPTION_ERROR_ACTION(M);\
  2948   }\
  2949 }
  2950 
  2951 /* Unlink the first chunk from a smallbin */
  2952 #define unlink_first_small_chunk(M, B, P, I) {\
  2953   mchunkptr F = P->fd;\
  2954   assert(P != B);\
  2955   assert(P != F);\
  2956   assert(chunksize(P) == small_index2size(I));\
  2957   if (B == F)\
  2958     clear_smallmap(M, I);\
  2959   else if (RTCHECK(ok_address(M, F))) {\
  2960     B->fd = F;\
  2961     F->bk = B;\
  2962   }\
  2963   else {\
  2964     CORRUPTION_ERROR_ACTION(M);\
  2965   }\
  2966 }
  2967 
  2968 /* Replace dv node, binning the old one */
  2969 /* Used only when dvsize known to be small */
  2970 #define replace_dv(M, P, S) {\
  2971   size_t DVS = M->dvsize;\
  2972   if (DVS != 0) {\
  2973     mchunkptr DV = M->dv;\
  2974     assert(is_small(DVS));\
  2975     insert_small_chunk(M, DV, DVS);\
  2976   }\
  2977   M->dvsize = S;\
  2978   M->dv = P;\
  2979 }
  2980 
  2981 /* ------------------------- Operations on trees ------------------------- */
  2982 
  2983 /* Insert chunk into tree */
  2984 #define insert_large_chunk(M, X, S) {\
  2985   tbinptr* H;\
  2986   bindex_t I;\
  2987   compute_tree_index(S, I);\
  2988   H = treebin_at(M, I);\
  2989   X->index = I;\
  2990   X->child[0] = X->child[1] = 0;\
  2991   if (!treemap_is_marked(M, I)) {\
  2992     mark_treemap(M, I);\
  2993     *H = X;\
  2994     X->parent = (tchunkptr)H;\
  2995     X->fd = X->bk = X;\
  2996   }\
  2997   else {\
  2998     tchunkptr T = *H;\
  2999     size_t K = S << leftshift_for_tree_index(I);\
  3000     for (;;) {\
  3001       if (chunksize(T) != S) {\
  3002         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  3003         K <<= 1;\
  3004         if (*C != 0)\
  3005           T = *C;\
  3006         else if (RTCHECK(ok_address(M, C))) {\
  3007           *C = X;\
  3008           X->parent = T;\
  3009           X->fd = X->bk = X;\
  3010           break;\
  3011         }\
  3012         else {\
  3013           CORRUPTION_ERROR_ACTION(M);\
  3014           break;\
  3015         }\
  3016       }\
  3017       else {\
  3018         tchunkptr F = T->fd;\
  3019         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  3020           T->fd = F->bk = X;\
  3021           X->fd = F;\
  3022           X->bk = T;\
  3023           X->parent = 0;\
  3024           break;\
  3025         }\
  3026         else {\
  3027           CORRUPTION_ERROR_ACTION(M);\
  3028           break;\
  3029         }\
  3030       }\
  3031     }\
  3032   }\
  3033 }
  3034 
  3035 /*
  3036   Unlink steps:
  3037 
  3038   1. If x is a chained node, unlink it from its same-sized fd/bk links
  3039      and choose its bk node as its replacement.
  3040   2. If x was the last node of its size, but not a leaf node, it must
  3041      be replaced with a leaf node (not merely one with an open left or
  3042      right), to make sure that lefts and rights of descendents
  3043      correspond properly to bit masks.  We use the rightmost descendent
  3044      of x.  We could use any other leaf, but this is easy to locate and
  3045      tends to counteract removal of leftmosts elsewhere, and so keeps
  3046      paths shorter than minimally guaranteed.  This doesn't loop much
  3047      because on average a node in a tree is near the bottom.
  3048   3. If x is the base of a chain (i.e., has parent links) relink
  3049      x's parent and children to x's replacement (or null if none).
  3050 */
  3051 
  3052 #define unlink_large_chunk(M, X) {\
  3053   tchunkptr XP = X->parent;\
  3054   tchunkptr R;\
  3055   if (X->bk != X) {\
  3056     tchunkptr F = X->fd;\
  3057     R = X->bk;\
  3058     if (RTCHECK(ok_address(M, F))) {\
  3059       F->bk = R;\
  3060       R->fd = F;\
  3061     }\
  3062     else {\
  3063       CORRUPTION_ERROR_ACTION(M);\
  3064     }\
  3065   }\
  3066   else {\
  3067     tchunkptr* RP;\
  3068     if (((R = *(RP = &(X->child[1]))) != 0) ||\
  3069         ((R = *(RP = &(X->child[0]))) != 0)) {\
  3070       tchunkptr* CP;\
  3071       while ((*(CP = &(R->child[1])) != 0) ||\
  3072              (*(CP = &(R->child[0])) != 0)) {\
  3073         R = *(RP = CP);\
  3074       }\
  3075       if (RTCHECK(ok_address(M, RP)))\
  3076         *RP = 0;\
  3077       else {\
  3078         CORRUPTION_ERROR_ACTION(M);\
  3079       }\
  3080     }\
  3081   }\
  3082   if (XP != 0) {\
  3083     tbinptr* H = treebin_at(M, X->index);\
  3084     if (X == *H) {\
  3085       if ((*H = R) == 0) \
  3086         clear_treemap(M, X->index);\
  3087     }\
  3088     else if (RTCHECK(ok_address(M, XP))) {\
  3089       if (XP->child[0] == X) \
  3090         XP->child[0] = R;\
  3091       else \
  3092         XP->child[1] = R;\
  3093     }\
  3094     else\
  3095       CORRUPTION_ERROR_ACTION(M);\
  3096     if (R != 0) {\
  3097       if (RTCHECK(ok_address(M, R))) {\
  3098         tchunkptr C0, C1;\
  3099         R->parent = XP;\
  3100         if ((C0 = X->child[0]) != 0) {\
  3101           if (RTCHECK(ok_address(M, C0))) {\
  3102             R->child[0] = C0;\
  3103             C0->parent = R;\
  3104           }\
  3105           else\
  3106             CORRUPTION_ERROR_ACTION(M);\
  3107         }\
  3108         if ((C1 = X->child[1]) != 0) {\
  3109           if (RTCHECK(ok_address(M, C1))) {\
  3110             R->child[1] = C1;\
  3111             C1->parent = R;\
  3112           }\
  3113           else\
  3114             CORRUPTION_ERROR_ACTION(M);\
  3115         }\
  3116       }\
  3117       else\
  3118         CORRUPTION_ERROR_ACTION(M);\
  3119     }\
  3120   }\
  3121 }
  3122 
  3123 /* Relays to large vs small bin operations */
  3124 
  3125 #define insert_chunk(M, P, S)\
  3126   if (is_small(S)) insert_small_chunk(M, P, S)\
  3127   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  3128 
  3129 #define unlink_chunk(M, P, S)\
  3130   if (is_small(S)) unlink_small_chunk(M, P, S)\
  3131   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  3132 
  3133 
  3134 /* Relays to internal calls to malloc/free from realloc, memalign etc */
  3135 
  3136 #if ONLY_MSPACES
  3137 #define internal_malloc(m, b) mspace_malloc(m, b)
  3138 #define internal_free(m, mem) mspace_free(m,mem);
  3139 #else /* ONLY_MSPACES */
  3140 #if MSPACES
  3141 #define internal_malloc(m, b)\
  3142    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  3143 #define internal_free(m, mem)\
  3144    if (m == gm) dlfree(mem); else mspace_free(m,mem);
  3145 #else /* MSPACES */
  3146 #define internal_malloc(m, b) dlmalloc(b)
  3147 #define internal_free(m, mem) dlfree(mem)
  3148 #endif /* MSPACES */
  3149 #endif /* ONLY_MSPACES */
  3150 
  3151 /* -----------------------  Direct-mmapping chunks ----------------------- */
  3152 
  3153 /*
  3154   Directly mmapped chunks are set up with an offset to the start of
  3155   the mmapped region stored in the prev_foot field of the chunk. This
  3156   allows reconstruction of the required argument to MUNMAP when freed,
  3157   and also allows adjustment of the returned chunk to meet alignment
  3158   requirements (especially in memalign).  There is also enough space
  3159   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3160   the PINUSE bit so frees can be checked.
  3161 */
  3162 
  3163 /* Malloc using mmap */
  3164 static void* mmap_alloc(mstate m, size_t nb) {
  3165   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3166   if (mmsize > nb) {     /* Check for wrap around 0 */
  3167     char* mm = (char*)(DIRECT_MMAP(mmsize));
  3168     if (mm != CMFAIL) {
  3169       size_t offset = align_offset(chunk2mem(mm));
  3170       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3171       mchunkptr p = (mchunkptr)(mm + offset);
  3172       p->prev_foot = offset | IS_MMAPPED_BIT;
  3173       (p)->head = (psize|CINUSE_BIT);
  3174       mark_inuse_foot(m, p, psize);
  3175       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3176       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  3177 
  3178       if (mm < m->least_addr)
  3179         m->least_addr = mm;
  3180       if ((m->footprint += mmsize) > m->max_footprint)
  3181         m->max_footprint = m->footprint;
  3182       assert(is_aligned(chunk2mem(p)));
  3183       check_mmapped_chunk(m, p);
  3184       return chunk2mem(p);
  3185     }
  3186   }
  3187   return 0;
  3188 }
  3189 
  3190 /* Realloc using mmap */
  3191 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  3192   size_t oldsize = chunksize(oldp);
  3193   if (is_small(nb)) /* Can't shrink mmap regions below small size */
  3194     return 0;
  3195   /* Keep old chunk if big enough but not too big */
  3196   if (oldsize >= nb + SIZE_T_SIZE &&
  3197       (oldsize - nb) <= (mparams.granularity << 1))
  3198     return oldp;
  3199   else {
  3200     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  3201     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  3202     size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  3203                                          CHUNK_ALIGN_MASK);
  3204     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
  3205                                   oldmmsize, newmmsize, 1);
  3206     if (cp != CMFAIL) {
  3207       mchunkptr newp = (mchunkptr)(cp + offset);
  3208       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  3209       newp->head = (psize|CINUSE_BIT);
  3210       mark_inuse_foot(m, newp, psize);
  3211       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  3212       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
  3213 
  3214       if (cp < m->least_addr)
  3215         m->least_addr = cp;
  3216       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  3217         m->max_footprint = m->footprint;
  3218       check_mmapped_chunk(m, newp);
  3219       return newp;
  3220     }
  3221   }
  3222   return 0;
  3223 }
  3224 
  3225 /* -------------------------- mspace management -------------------------- */
  3226 
  3227 /* Initialize top chunk and its size */
  3228 static void init_top(mstate m, mchunkptr p, size_t psize) {
  3229   /* Ensure alignment */
  3230   size_t offset = align_offset(chunk2mem(p));
  3231   p = (mchunkptr)((char*)p + offset);
  3232   psize -= offset;
  3233 
  3234   m->top = p;
  3235   m->topsize = psize;
  3236   p->head = psize | PINUSE_BIT;
  3237   /* set size of fake trailing chunk holding overhead space only once */
  3238   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3239   m->trim_check = mparams.trim_threshold; /* reset on each update */
  3240 }
  3241 
  3242 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3243 static void init_bins(mstate m) {
  3244   /* Establish circular links for smallbins */
  3245   bindex_t i;
  3246   for (i = 0; i < NSMALLBINS; ++i) {
  3247     sbinptr bin = smallbin_at(m,i);
  3248     bin->fd = bin->bk = bin;
  3249   }
  3250 }
  3251 
  3252 #if PROCEED_ON_ERROR
  3253 
  3254 /* default corruption action */
  3255 static void reset_on_error(mstate m) {
  3256   int i;
  3257   ++malloc_corruption_error_count;
  3258   /* Reinitialize fields to forget about all memory */
  3259   m->smallbins = m->treebins = 0;
  3260   m->dvsize = m->topsize = 0;
  3261   m->seg.base = 0;
  3262   m->seg.size = 0;
  3263   m->seg.next = 0;
  3264   m->top = m->dv = 0;
  3265   for (i = 0; i < NTREEBINS; ++i)
  3266     *treebin_at(m, i) = 0;
  3267   init_bins(m);
  3268 }
  3269 #endif /* PROCEED_ON_ERROR */
  3270 
  3271 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3272 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  3273                            size_t nb) {
  3274   mchunkptr p = align_as_chunk(newbase);
  3275   mchunkptr oldfirst = align_as_chunk(oldbase);
  3276   size_t psize = (char*)oldfirst - (char*)p;
  3277   mchunkptr q = chunk_plus_offset(p, nb);
  3278   size_t qsize = psize - nb;
  3279   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3280 
  3281   assert((char*)oldfirst > (char*)q);
  3282   assert(pinuse(oldfirst));
  3283   assert(qsize >= MIN_CHUNK_SIZE);
  3284 
  3285   /* consolidate remainder with first chunk of old base */
  3286   if (oldfirst == m->top) {
  3287     size_t tsize = m->topsize += qsize;
  3288     m->top = q;
  3289     q->head = tsize | PINUSE_BIT;
  3290     check_top_chunk(m, q);
  3291   }
  3292   else if (oldfirst == m->dv) {
  3293     size_t dsize = m->dvsize += qsize;
  3294     m->dv = q;
  3295     set_size_and_pinuse_of_free_chunk(q, dsize);
  3296   }
  3297   else {
  3298     if (!cinuse(oldfirst)) {
  3299       size_t nsize = chunksize(oldfirst);
  3300       unlink_chunk(m, oldfirst, nsize);
  3301       oldfirst = chunk_plus_offset(oldfirst, nsize);
  3302       qsize += nsize;
  3303     }
  3304     set_free_with_pinuse(q, qsize, oldfirst);
  3305     insert_chunk(m, q, qsize);
  3306     check_free_chunk(m, q);
  3307   }
  3308 
  3309   check_malloced_chunk(m, chunk2mem(p), nb);
  3310   return chunk2mem(p);
  3311 }
  3312 
  3313 
  3314 /* Add a segment to hold a new noncontiguous region */
  3315 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  3316   /* Determine locations and sizes of segment, fenceposts, old top */
  3317   char* old_top = (char*)m->top;
  3318   msegmentptr oldsp = segment_holding(m, old_top);
  3319   char* old_end = oldsp->base + oldsp->size;
  3320   size_t ssize = pad_request(sizeof(struct malloc_segment));
  3321   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3322   size_t offset = align_offset(chunk2mem(rawsp));
  3323   char* asp = rawsp + offset;
  3324   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  3325   mchunkptr sp = (mchunkptr)csp;
  3326   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  3327   mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3328   mchunkptr p = tnext;
  3329   int nfences = 0;
  3330 
  3331   /* reset top to new space */
  3332   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3333 
  3334   /* Set up segment record */
  3335   assert(is_aligned(ss));
  3336   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3337   *ss = m->seg; /* Push current record */
  3338   m->seg.base = tbase;
  3339   m->seg.size = tsize;
  3340   m->seg.sflags = mmapped;
  3341   m->seg.next = ss;
  3342 
  3343   /* Insert trailing fenceposts */
  3344   for (;;) {
  3345     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3346     p->head = FENCEPOST_HEAD;
  3347     ++nfences;
  3348     if ((char*)(&(nextp->head)) < old_end)
  3349       p = nextp;
  3350     else
  3351       break;
  3352   }
  3353   assert(nfences >= 2);
  3354 
  3355   /* Insert the rest of old top into a bin as an ordinary free chunk */
  3356   if (csp != old_top) {
  3357     mchunkptr q = (mchunkptr)old_top;
  3358     size_t psize = csp - old_top;
  3359     mchunkptr tn = chunk_plus_offset(q, psize);
  3360     set_free_with_pinuse(q, psize, tn);
  3361     insert_chunk(m, q, psize);
  3362   }
  3363 
  3364   check_top_chunk(m, m->top);
  3365 }
  3366 
  3367 /* -------------------------- System allocation -------------------------- */
  3368 
  3369 /* Get memory from system using MORECORE or MMAP */
  3370 static void* sys_alloc(mstate m, size_t nb) {
  3371   char* tbase = CMFAIL;
  3372   size_t tsize = 0;
  3373   flag_t mmap_flag = 0;
  3374 
  3375   init_mparams();
  3376 
  3377   /* Directly map large chunks */
  3378   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3379     void* mem = mmap_alloc(m, nb);
  3380     if (mem != 0)
  3381       return mem;
  3382   }
  3383 
  3384   /*
  3385     Try getting memory in any of three ways (in most-preferred to
  3386     least-preferred order):
  3387     1. A call to MORECORE that can normally contiguously extend memory.
  3388        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3389        or main space is mmapped or a previous contiguous call failed)
  3390     2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3391        Note that under the default settings, if MORECORE is unable to
  3392        fulfill a request, and HAVE_MMAP is true, then mmap is
  3393        used as a noncontiguous system allocator. This is a useful backup
  3394        strategy for systems with holes in address spaces -- in this case
  3395        sbrk cannot contiguously expand the heap, but mmap may be able to
  3396        find space.
  3397     3. A call to MORECORE that cannot usually contiguously extend memory.
  3398        (disabled if not HAVE_MORECORE)
  3399   */
  3400 
  3401   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3402     char* br = CMFAIL;
  3403     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  3404     size_t asize = 0;
  3405     ACQUIRE_MORECORE_LOCK();
  3406 
  3407     if (ss == 0) {  /* First time through or recovery */
  3408       char* base = (char*)CALL_MORECORE(0);
  3409       if (base != CMFAIL) {
  3410         asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3411         /* Adjust to end on a page boundary */
  3412         if (!is_page_aligned(base))
  3413           asize += (page_align((size_t)base) - (size_t)base);
  3414         /* Can't call MORECORE if size is negative when treated as signed */
  3415         if (asize < HALF_MAX_SIZE_T &&
  3416             (br = (char*)(CALL_MORECORE(asize))) == base) {
  3417           tbase = base;
  3418           tsize = asize;
  3419         }
  3420       }
  3421     }
  3422     else {
  3423       /* Subtract out existing available top space from MORECORE request. */
  3424       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
  3425       /* Use mem here only if it did continuously extend old space */
  3426       if (asize < HALF_MAX_SIZE_T &&
  3427           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  3428         tbase = br;
  3429         tsize = asize;
  3430       }
  3431     }
  3432 
  3433     if (tbase == CMFAIL) {    /* Cope with partial failure */
  3434       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  3435         if (asize < HALF_MAX_SIZE_T &&
  3436             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3437           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
  3438           if (esize < HALF_MAX_SIZE_T) {
  3439             char* end = (char*)CALL_MORECORE(esize);
  3440             if (end != CMFAIL)
  3441               asize += esize;
  3442             else {            /* Can't use; try to release */
  3443               end = (char*)CALL_MORECORE(-asize);
  3444               br = CMFAIL;
  3445             }
  3446           }
  3447         }
  3448       }
  3449       if (br != CMFAIL) {    /* Use the space we did get */
  3450         tbase = br;
  3451         tsize = asize;
  3452       }
  3453       else
  3454         disable_contiguous(m); /* Don't try contiguous path in the future */
  3455     }
  3456 
  3457     RELEASE_MORECORE_LOCK();
  3458   }
  3459 
  3460   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  3461     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  3462     size_t rsize = granularity_align(req);
  3463     if (rsize > nb) { /* Fail if wraps around zero */
  3464       char* mp = (char*)(CALL_MMAP(rsize));
  3465       if (mp != CMFAIL) {
  3466         tbase = mp;
  3467         tsize = rsize;
  3468         mmap_flag = IS_MMAPPED_BIT;
  3469       }
  3470     }
  3471   }
  3472 
  3473   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
  3474     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3475     if (asize < HALF_MAX_SIZE_T) {
  3476       char* br = CMFAIL;
  3477       char* end = CMFAIL;
  3478       ACQUIRE_MORECORE_LOCK();
  3479       br = (char*)(CALL_MORECORE(asize));
  3480       end = (char*)(CALL_MORECORE(0));
  3481       RELEASE_MORECORE_LOCK();
  3482       if (br != CMFAIL && end != CMFAIL && br < end) {
  3483         size_t ssize = end - br;
  3484         if (ssize > nb + TOP_FOOT_SIZE) {
  3485           tbase = br;
  3486           tsize = ssize;
  3487         }
  3488       }
  3489     }
  3490   }
  3491 
  3492   if (tbase != CMFAIL) {
  3493 
  3494     if ((m->footprint += tsize) > m->max_footprint)
  3495       m->max_footprint = m->footprint;
  3496 
  3497     if (!is_initialized(m)) { /* first-time initialization */
  3498       m->seg.base = m->least_addr = tbase;
  3499       m->seg.size = tsize;
  3500       m->seg.sflags = mmap_flag;
  3501       m->magic = mparams.magic;
  3502       init_bins(m);
  3503       if (is_global(m)) 
  3504         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3505       else {
  3506         /* Offset top by embedded malloc_state */
  3507         mchunkptr mn = next_chunk(mem2chunk(m));
  3508         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
  3509       }
  3510     }
  3511 
  3512     else {
  3513       /* Try to merge with an existing segment */
  3514       msegmentptr sp = &m->seg;
  3515       while (sp != 0 && tbase != sp->base + sp->size)
  3516         sp = sp->next;
  3517       if (sp != 0 &&
  3518           !is_extern_segment(sp) &&
  3519           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
  3520           segment_holds(sp, m->top)) { /* append */
  3521         sp->size += tsize;
  3522         init_top(m, m->top, m->topsize + tsize);
  3523       }
  3524       else {
  3525         if (tbase < m->least_addr)
  3526           m->least_addr = tbase;
  3527         sp = &m->seg;
  3528         while (sp != 0 && sp->base != tbase + tsize)
  3529           sp = sp->next;
  3530         if (sp != 0 &&
  3531             !is_extern_segment(sp) &&
  3532             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  3533           char* oldbase = sp->base;
  3534           sp->base = tbase;
  3535           sp->size += tsize;
  3536           return prepend_alloc(m, tbase, oldbase, nb);
  3537         }
  3538         else
  3539           add_segment(m, tbase, tsize, mmap_flag);
  3540       }
  3541     }
  3542 
  3543     if (nb < m->topsize) { /* Allocate from new or extended top space */
  3544       size_t rsize = m->topsize -= nb;
  3545       mchunkptr p = m->top;
  3546       mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3547       r->head = rsize | PINUSE_BIT;
  3548       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3549       check_top_chunk(m, m->top);
  3550       check_malloced_chunk(m, chunk2mem(p), nb);
  3551       return chunk2mem(p);
  3552     }
  3553   }
  3554 
  3555   MALLOC_FAILURE_ACTION;
  3556   return 0;
  3557 }
  3558 
  3559 /* -----------------------  system deallocation -------------------------- */
  3560 
  3561 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3562 static size_t release_unused_segments(mstate m) {
  3563   size_t released = 0;
  3564   msegmentptr pred = &m->seg;
  3565   msegmentptr sp = pred->next;
  3566   while (sp != 0) {
  3567     char* base = sp->base;
  3568     size_t size = sp->size;
  3569     msegmentptr next = sp->next;
  3570     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3571       mchunkptr p = align_as_chunk(base);
  3572       size_t psize = chunksize(p);
  3573       /* Can unmap if first chunk holds entire segment and not pinned */
  3574       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
  3575         tchunkptr tp = (tchunkptr)p;
  3576         assert(segment_holds(sp, (char*)sp));
  3577         if (p == m->dv) {
  3578           m->dv = 0;
  3579           m->dvsize = 0;
  3580         }
  3581         else {
  3582           unlink_large_chunk(m, tp);
  3583         }
  3584         if (CALL_MUNMAP(base, size) == 0) {
  3585           released += size;
  3586           m->footprint -= size;
  3587           /* unlink obsoleted record */
  3588           sp = pred;
  3589           sp->next = next;
  3590         }
  3591         else { /* back out if cannot unmap */
  3592           insert_large_chunk(m, tp, psize);
  3593         }
  3594       }
  3595     }
  3596     pred = sp;
  3597     sp = next;
  3598   }
  3599   return released;
  3600 }
  3601 
  3602 static int sys_trim(mstate m, size_t pad) {
  3603   size_t released = 0;
  3604   if (pad < MAX_REQUEST && is_initialized(m)) {
  3605     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
  3606 
  3607     if (m->topsize > pad) {
  3608       /* Shrink top space in granularity-size units, keeping at least one */
  3609       size_t unit = mparams.granularity;
  3610       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  3611                       SIZE_T_ONE) * unit;
  3612       msegmentptr sp = segment_holding(m, (char*)m->top);
  3613 
  3614       if (!is_extern_segment(sp)) {
  3615         if (is_mmapped_segment(sp)) {
  3616           if (HAVE_MMAP &&
  3617               sp->size >= extra &&
  3618               !has_segment_link(m, sp)) { /* can't shrink if pinned */
  3619             size_t newsize = sp->size - extra;
  3620             /* Prefer mremap, fall back to munmap */
  3621             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
  3622                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  3623               released = extra;
  3624             }
  3625           }
  3626         }
  3627         else if (HAVE_MORECORE) {
  3628           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
  3629             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  3630           ACQUIRE_MORECORE_LOCK();
  3631           {
  3632             /* Make sure end of memory is where we last set it. */
  3633             char* old_br = (char*)(CALL_MORECORE(0));
  3634             if (old_br == sp->base + sp->size) {
  3635               char* rel_br = (char*)(CALL_MORECORE(-extra));
  3636               char* new_br = (char*)(CALL_MORECORE(0));
  3637               if (rel_br != CMFAIL && new_br < old_br)
  3638                 released = old_br - new_br;
  3639             }
  3640           }
  3641           RELEASE_MORECORE_LOCK();
  3642         }
  3643       }
  3644 
  3645       if (released != 0) {
  3646         sp->size -= released;
  3647         m->footprint -= released;
  3648         init_top(m, m->top, m->topsize - released);
  3649         check_top_chunk(m, m->top);
  3650       }
  3651     }
  3652 
  3653     /* Unmap any unused mmapped segments */
  3654     if (HAVE_MMAP) 
  3655       released += release_unused_segments(m);
  3656 
  3657     /* On failure, disable autotrim to avoid repeated failed future calls */
  3658     if (released == 0)
  3659       m->trim_check = MAX_SIZE_T;
  3660   }
  3661 
  3662   return (released != 0)? 1 : 0;
  3663 }
  3664 
  3665 /* ---------------------------- malloc support --------------------------- */
  3666 
  3667 /* allocate a large request from the best fitting chunk in a treebin */
  3668 static void* tmalloc_large(mstate m, size_t nb) {
  3669   tchunkptr v = 0;
  3670   size_t rsize = -nb; /* Unsigned negation */
  3671   tchunkptr t;
  3672   bindex_t idx;
  3673   compute_tree_index(nb, idx);
  3674 
  3675   if ((t = *treebin_at(m, idx)) != 0) {
  3676     /* Traverse tree for this bin looking for node with size == nb */
  3677     size_t sizebits = nb << leftshift_for_tree_index(idx);
  3678     tchunkptr rst = 0;  /* The deepest untaken right subtree */
  3679     for (;;) {
  3680       tchunkptr rt;
  3681       size_t trem = chunksize(t) - nb;
  3682       if (trem < rsize) {
  3683         v = t;
  3684         if ((rsize = trem) == 0)
  3685           break;
  3686       }
  3687       rt = t->child[1];
  3688       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  3689       if (rt != 0 && rt != t)
  3690         rst = rt;
  3691       if (t == 0) {
  3692         t = rst; /* set t to least subtree holding sizes > nb */
  3693         break;
  3694       }
  3695       sizebits <<= 1;
  3696     }
  3697   }
  3698 
  3699   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
  3700     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  3701     if (leftbits != 0) {
  3702       bindex_t i;
  3703       binmap_t leastbit = least_bit(leftbits);
  3704       compute_bit2idx(leastbit, i);
  3705       t = *treebin_at(m, i);
  3706     }
  3707   }
  3708 
  3709   while (t != 0) { /* find smallest of tree or subtree */
  3710     size_t trem = chunksize(t) - nb;
  3711     if (trem < rsize) {
  3712       rsize = trem;
  3713       v = t;
  3714     }
  3715     t = leftmost_child(t);
  3716   }
  3717 
  3718   /*  If dv is a better fit, return 0 so malloc will use it */
  3719   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
  3720     if (RTCHECK(ok_address(m, v))) { /* split */
  3721       mchunkptr r = chunk_plus_offset(v, nb);
  3722       assert(chunksize(v) == rsize + nb);
  3723       if (RTCHECK(ok_next(v, r))) {
  3724         unlink_large_chunk(m, v);
  3725         if (rsize < MIN_CHUNK_SIZE)
  3726           set_inuse_and_pinuse(m, v, (rsize + nb));
  3727         else {
  3728           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3729           set_size_and_pinuse_of_free_chunk(r, rsize);
  3730           insert_chunk(m, r, rsize);
  3731         }
  3732         return chunk2mem(v);
  3733       }
  3734     }
  3735     CORRUPTION_ERROR_ACTION(m);
  3736   }
  3737   return 0;
  3738 }
  3739 
  3740 /* allocate a small request from the best fitting chunk in a treebin */
  3741 static void* tmalloc_small(mstate m, size_t nb) {
  3742   tchunkptr t, v;
  3743   size_t rsize;
  3744   bindex_t i;
  3745   binmap_t leastbit = least_bit(m->treemap);
  3746   compute_bit2idx(leastbit, i);
  3747 
  3748   v = t = *treebin_at(m, i);
  3749   rsize = chunksize(t) - nb;
  3750 
  3751   while ((t = leftmost_child(t)) != 0) {
  3752     size_t trem = chunksize(t) - nb;
  3753     if (trem < rsize) {
  3754       rsize = trem;
  3755       v = t;
  3756     }
  3757   }
  3758 
  3759   if (RTCHECK(ok_address(m, v))) {
  3760     mchunkptr r = chunk_plus_offset(v, nb);
  3761     assert(chunksize(v) == rsize + nb);
  3762     if (RTCHECK(ok_next(v, r))) {
  3763       unlink_large_chunk(m, v);
  3764       if (rsize < MIN_CHUNK_SIZE)
  3765         set_inuse_and_pinuse(m, v, (rsize + nb));
  3766       else {
  3767         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3768         set_size_and_pinuse_of_free_chunk(r, rsize);
  3769         replace_dv(m, r, rsize);
  3770       }
  3771       return chunk2mem(v);
  3772     }
  3773   }
  3774 
  3775   CORRUPTION_ERROR_ACTION(m);
  3776   return 0;
  3777 }
  3778 
  3779 /* --------------------------- realloc support --------------------------- */
  3780 
  3781 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
  3782   if (bytes >= MAX_REQUEST) {
  3783     MALLOC_FAILURE_ACTION;
  3784     return 0;
  3785   }
  3786   if (!PREACTION(m)) {
  3787     mchunkptr oldp = mem2chunk(oldmem);
  3788     size_t oldsize = chunksize(oldp);
  3789     mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3790     mchunkptr newp = 0;
  3791     void* extra = 0;
  3792 
  3793     /* Try to either shrink or extend into top. Else malloc-copy-free */
  3794 
  3795     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3796                 ok_next(oldp, next) && ok_pinuse(next))) {
  3797       size_t nb = request2size(bytes);
  3798       if (is_mmapped(oldp))
  3799         newp = mmap_resize(m, oldp, nb);
  3800       else if (oldsize >= nb) { /* already big enough */
  3801         size_t rsize = oldsize - nb;
  3802         newp = oldp;
  3803         if (rsize >= MIN_CHUNK_SIZE) {
  3804           mchunkptr remainder = chunk_plus_offset(newp, nb);
  3805           set_inuse(m, newp, nb);
  3806           set_inuse(m, remainder, rsize);
  3807           extra = chunk2mem(remainder);
  3808         }
  3809       }
  3810       else if (next == m->top && oldsize + m->topsize > nb) {
  3811         /* Expand into top */
  3812         size_t newsize = oldsize + m->topsize;
  3813         size_t newtopsize = newsize - nb;
  3814         mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3815         set_inuse(m, oldp, nb);
  3816         newtop->head = newtopsize |PINUSE_BIT;
  3817         m->top = newtop;
  3818         m->topsize = newtopsize;
  3819         newp = oldp;
  3820       }
  3821     }
  3822     else {
  3823       USAGE_ERROR_ACTION(m, oldmem);
  3824       POSTACTION(m);
  3825       return 0;
  3826     }
  3827 
  3828     POSTACTION(m);
  3829 
  3830     if (newp != 0) {
  3831       if (extra != 0) {
  3832         internal_free(m, extra);
  3833       }
  3834       check_inuse_chunk(m, newp);
  3835       return chunk2mem(newp);
  3836     }
  3837     else {
  3838       void* newmem = internal_malloc(m, bytes);
  3839       if (newmem != 0) {
  3840         size_t oc = oldsize - overhead_for(oldp);
  3841         SDL_memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
  3842         internal_free(m, oldmem);
  3843       }
  3844       return newmem;
  3845     }
  3846   }
  3847   return 0;
  3848 }
  3849 
  3850 /* --------------------------- memalign support -------------------------- */
  3851 
  3852 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
  3853   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
  3854     return internal_malloc(m, bytes);
  3855   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
  3856     alignment = MIN_CHUNK_SIZE;
  3857   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
  3858     size_t a = MALLOC_ALIGNMENT << 1;
  3859     while (a < alignment) a <<= 1;
  3860     alignment = a;
  3861   }
  3862   
  3863   if (bytes >= MAX_REQUEST - alignment) {
  3864     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
  3865       MALLOC_FAILURE_ACTION;
  3866     }
  3867   }
  3868   else {
  3869     size_t nb = request2size(bytes);
  3870     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  3871     char* mem = (char*)internal_malloc(m, req);
  3872     if (mem != 0) {
  3873       void* leader = 0;
  3874       void* trailer = 0;
  3875       mchunkptr p = mem2chunk(mem);
  3876 
  3877       if (PREACTION(m)) return 0;
  3878       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
  3879         /*
  3880           Find an aligned spot inside chunk.  Since we need to give
  3881           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  3882           the first calculation places us at a spot with less than
  3883           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  3884           We've allocated enough total room so that this is always
  3885           possible.
  3886         */
  3887         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
  3888                                                        alignment -
  3889                                                        SIZE_T_ONE)) &
  3890                                              -alignment));
  3891         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
  3892           br : br+alignment;
  3893         mchunkptr newp = (mchunkptr)pos;
  3894         size_t leadsize = pos - (char*)(p);
  3895         size_t newsize = chunksize(p) - leadsize;
  3896 
  3897         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
  3898           newp->prev_foot = p->prev_foot + leadsize;
  3899           newp->head = (newsize|CINUSE_BIT);
  3900         }
  3901         else { /* Otherwise, give back leader, use the rest */
  3902           set_inuse(m, newp, newsize);
  3903           set_inuse(m, p, leadsize);
  3904           leader = chunk2mem(p);
  3905         }
  3906         p = newp;
  3907       }
  3908 
  3909       /* Give back spare room at the end */
  3910       if (!is_mmapped(p)) {
  3911         size_t size = chunksize(p);
  3912         if (size > nb + MIN_CHUNK_SIZE) {
  3913           size_t remainder_size = size - nb;
  3914           mchunkptr remainder = chunk_plus_offset(p, nb);
  3915           set_inuse(m, p, nb);
  3916           set_inuse(m, remainder, remainder_size);
  3917           trailer = chunk2mem(remainder);
  3918         }
  3919       }
  3920 
  3921       assert (chunksize(p) >= nb);
  3922       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
  3923       check_inuse_chunk(m, p);
  3924       POSTACTION(m);
  3925       if (leader != 0) {
  3926         internal_free(m, leader);
  3927       }
  3928       if (trailer != 0) {
  3929         internal_free(m, trailer);
  3930       }
  3931       return chunk2mem(p);
  3932     }
  3933   }
  3934   return 0;
  3935 }
  3936 
  3937 /* ------------------------ comalloc/coalloc support --------------------- */
  3938 
  3939 static void** ialloc(mstate m,
  3940                      size_t n_elements,
  3941                      size_t* sizes,
  3942                      int opts,
  3943                      void* chunks[]) {
  3944   /*
  3945     This provides common support for independent_X routines, handling
  3946     all of the combinations that can result.
  3947 
  3948     The opts arg has:
  3949     bit 0 set if all elements are same size (using sizes[0])
  3950     bit 1 set if elements should be zeroed
  3951   */
  3952 
  3953   size_t    element_size;   /* chunksize of each element, if all same */
  3954   size_t    contents_size;  /* total size of elements */
  3955   size_t    array_size;     /* request size of pointer array */
  3956   void*     mem;            /* malloced aggregate space */
  3957   mchunkptr p;              /* corresponding chunk */
  3958   size_t    remainder_size; /* remaining bytes while splitting */
  3959   void**    marray;         /* either "chunks" or malloced ptr array */
  3960   mchunkptr array_chunk;    /* chunk for malloced ptr array */
  3961   flag_t    was_enabled;    /* to disable mmap */
  3962   size_t    size;
  3963   size_t    i;
  3964 
  3965   /* compute array length, if needed */
  3966   if (chunks != 0) {
  3967     if (n_elements == 0)
  3968       return chunks; /* nothing to do */
  3969     marray = chunks;
  3970     array_size = 0;
  3971   }
  3972   else {
  3973     /* if empty req, must still return chunk representing empty array */
  3974     if (n_elements == 0)
  3975       return (void**)internal_malloc(m, 0);
  3976     marray = 0;
  3977     array_size = request2size(n_elements * (sizeof(void*)));
  3978   }
  3979 
  3980   /* compute total element size */
  3981   if (opts & 0x1) { /* all-same-size */
  3982     element_size = request2size(*sizes);
  3983     contents_size = n_elements * element_size;
  3984   }
  3985   else { /* add up all the sizes */
  3986     element_size = 0;
  3987     contents_size = 0;
  3988     for (i = 0; i != n_elements; ++i)
  3989       contents_size += request2size(sizes[i]);
  3990   }
  3991 
  3992   size = contents_size + array_size;
  3993 
  3994   /*
  3995      Allocate the aggregate chunk.  First disable direct-mmapping so
  3996      malloc won't use it, since we would not be able to later
  3997      free/realloc space internal to a segregated mmap region.
  3998   */
  3999   was_enabled = use_mmap(m);
  4000   disable_mmap(m);
  4001   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  4002   if (was_enabled)
  4003     enable_mmap(m);
  4004   if (mem == 0)
  4005     return 0;
  4006 
  4007   if (PREACTION(m)) return 0;
  4008   p = mem2chunk(mem);
  4009   remainder_size = chunksize(p);
  4010 
  4011   assert(!is_mmapped(p));
  4012 
  4013   if (opts & 0x2) {       /* optionally clear the elements */
  4014     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  4015   }
  4016 
  4017   /* If not provided, allocate the pointer array as final part of chunk */
  4018   if (marray == 0) {
  4019     size_t  array_chunk_size;
  4020     array_chunk = chunk_plus_offset(p, contents_size);
  4021     array_chunk_size = remainder_size - contents_size;
  4022     marray = (void**) (chunk2mem(array_chunk));
  4023     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  4024     remainder_size = contents_size;
  4025   }
  4026 
  4027   /* split out elements */
  4028   for (i = 0; ; ++i) {
  4029     marray[i] = chunk2mem(p);
  4030     if (i != n_elements-1) {
  4031       if (element_size != 0)
  4032         size = element_size;
  4033       else
  4034         size = request2size(sizes[i]);
  4035       remainder_size -= size;
  4036       set_size_and_pinuse_of_inuse_chunk(m, p, size);
  4037       p = chunk_plus_offset(p, size);
  4038     }
  4039     else { /* the final element absorbs any overallocation slop */
  4040       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  4041       break;
  4042     }
  4043   }
  4044 
  4045 #if DEBUG
  4046   if (marray != chunks) {
  4047     /* final element must have exactly exhausted chunk */
  4048     if (element_size != 0) {
  4049       assert(remainder_size == element_size);
  4050     }
  4051     else {
  4052       assert(remainder_size == request2size(sizes[i]));
  4053     }
  4054     check_inuse_chunk(m, mem2chunk(marray));
  4055   }
  4056   for (i = 0; i != n_elements; ++i)
  4057     check_inuse_chunk(m, mem2chunk(marray[i]));
  4058 
  4059 #endif /* DEBUG */
  4060 
  4061   POSTACTION(m);
  4062   return marray;
  4063 }
  4064 
  4065 
  4066 /* -------------------------- public routines ---------------------------- */
  4067 
  4068 #if !ONLY_MSPACES
  4069 
  4070 void* dlmalloc(size_t bytes) {
  4071   /*
  4072      Basic algorithm:
  4073      If a small request (< 256 bytes minus per-chunk overhead):
  4074        1. If one exists, use a remainderless chunk in associated smallbin.
  4075           (Remainderless means that there are too few excess bytes to
  4076           represent as a chunk.)
  4077        2. If it is big enough, use the dv chunk, which is normally the
  4078           chunk adjacent to the one used for the most recent small request.
  4079        3. If one exists, split the smallest available chunk in a bin,
  4080           saving remainder in dv.
  4081        4. If it is big enough, use the top chunk.
  4082        5. If available, get memory from system and use it
  4083      Otherwise, for a large request:
  4084        1. Find the smallest available binned chunk that fits, and use it
  4085           if it is better fitting than dv chunk, splitting if necessary.
  4086        2. If better fitting than any binned chunk, use the dv chunk.
  4087        3. If it is big enough, use the top chunk.
  4088        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4089        5. If available, get memory from system and use it
  4090 
  4091      The ugly goto's here ensure that postaction occurs along all paths.
  4092   */
  4093 
  4094   if (!PREACTION(gm)) {
  4095     void* mem;
  4096     size_t nb;
  4097     if (bytes <= MAX_SMALL_REQUEST) {
  4098       bindex_t idx;
  4099       binmap_t smallbits;
  4100       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4101       idx = small_index(nb);
  4102       smallbits = gm->smallmap >> idx;
  4103 
  4104       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4105         mchunkptr b, p;
  4106         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4107         b = smallbin_at(gm, idx);
  4108         p = b->fd;
  4109         assert(chunksize(p) == small_index2size(idx));
  4110         unlink_first_small_chunk(gm, b, p, idx);
  4111         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4112         mem = chunk2mem(p);
  4113         check_malloced_chunk(gm, mem, nb);
  4114         goto postaction;
  4115       }
  4116 
  4117       else if (nb > gm->dvsize) {
  4118         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4119           mchunkptr b, p, r;
  4120           size_t rsize;
  4121           bindex_t i;
  4122           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4123           binmap_t leastbit = least_bit(leftbits);
  4124           compute_bit2idx(leastbit, i);
  4125           b = smallbin_at(gm, i);
  4126           p = b->fd;
  4127           assert(chunksize(p) == small_index2size(i));
  4128           unlink_first_small_chunk(gm, b, p, i);
  4129           rsize = small_index2size(i) - nb;
  4130           /* Fit here cannot be remainderless if 4byte sizes */
  4131           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4132             set_inuse_and_pinuse(gm, p, small_index2size(i));
  4133           else {
  4134             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4135             r = chunk_plus_offset(p, nb);
  4136             set_size_and_pinuse_of_free_chunk(r, rsize);
  4137             replace_dv(gm, r, rsize);
  4138           }
  4139           mem = chunk2mem(p);
  4140           check_malloced_chunk(gm, mem, nb);
  4141           goto postaction;
  4142         }
  4143 
  4144         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  4145           check_malloced_chunk(gm, mem, nb);
  4146           goto postaction;
  4147         }
  4148       }
  4149     }
  4150     else if (bytes >= MAX_REQUEST)
  4151       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4152     else {
  4153       nb = pad_request(bytes);
  4154       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  4155         check_malloced_chunk(gm, mem, nb);
  4156         goto postaction;
  4157       }
  4158     }
  4159 
  4160     if (nb <= gm->dvsize) {
  4161       size_t rsize = gm->dvsize - nb;
  4162       mchunkptr p = gm->dv;
  4163       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4164         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  4165         gm->dvsize = rsize;
  4166         set_size_and_pinuse_of_free_chunk(r, rsize);
  4167         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4168       }
  4169       else { /* exhaust dv */
  4170         size_t dvs = gm->dvsize;
  4171         gm->dvsize = 0;
  4172         gm->dv = 0;
  4173         set_inuse_and_pinuse(gm, p, dvs);
  4174       }
  4175       mem = chunk2mem(p);
  4176       check_malloced_chunk(gm, mem, nb);
  4177       goto postaction;
  4178     }
  4179 
  4180     else if (nb < gm->topsize) { /* Split top */
  4181       size_t rsize = gm->topsize -= nb;
  4182       mchunkptr p = gm->top;
  4183       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  4184       r->head = rsize | PINUSE_BIT;
  4185       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4186       mem = chunk2mem(p);
  4187       check_top_chunk(gm, gm->top);
  4188       check_malloced_chunk(gm, mem, nb);
  4189       goto postaction;
  4190     }
  4191 
  4192     mem = sys_alloc(gm, nb);
  4193 
  4194   postaction:
  4195     POSTACTION(gm);
  4196     return mem;
  4197   }
  4198 
  4199   return 0;
  4200 }
  4201 
  4202 void dlfree(void* mem) {
  4203   /*
  4204      Consolidate freed chunks with preceeding or succeeding bordering
  4205      free chunks, if they exist, and then place in a bin.  Intermixed
  4206      with special cases for top, dv, mmapped chunks, and usage errors.
  4207   */
  4208 
  4209   if (mem != 0) {
  4210     mchunkptr p  = mem2chunk(mem);
  4211 #if FOOTERS
  4212     mstate fm = get_mstate_for(p);
  4213     if (!ok_magic(fm)) {
  4214       USAGE_ERROR_ACTION(fm, p);
  4215       return;
  4216     }
  4217 #else /* FOOTERS */
  4218 #define fm gm
  4219 #endif /* FOOTERS */
  4220     if (!PREACTION(fm)) {
  4221       check_inuse_chunk(fm, p);
  4222       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4223         size_t psize = chunksize(p);
  4224         mchunkptr next = chunk_plus_offset(p, psize);
  4225         if (!pinuse(p)) {
  4226           size_t prevsize = p->prev_foot;
  4227           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4228             prevsize &= ~IS_MMAPPED_BIT;
  4229             psize += prevsize + MMAP_FOOT_PAD;
  4230             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4231               fm->footprint -= psize;
  4232             goto postaction;
  4233           }
  4234           else {
  4235             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4236             psize += prevsize;
  4237             p = prev;
  4238             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4239               if (p != fm->dv) {
  4240                 unlink_chunk(fm, p, prevsize);
  4241               }
  4242               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4243                 fm->dvsize = psize;
  4244                 set_free_with_pinuse(p, psize, next);
  4245                 goto postaction;
  4246               }
  4247             }
  4248             else
  4249               goto erroraction;
  4250           }
  4251         }
  4252 
  4253         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4254           if (!cinuse(next)) {  /* consolidate forward */
  4255             if (next == fm->top) {
  4256               size_t tsize = fm->topsize += psize;
  4257               fm->top = p;
  4258               p->head = tsize | PINUSE_BIT;
  4259               if (p == fm->dv) {
  4260                 fm->dv = 0;
  4261                 fm->dvsize = 0;
  4262               }
  4263               if (should_trim(fm, tsize))
  4264                 sys_trim(fm, 0);
  4265               goto postaction;
  4266             }
  4267             else if (next == fm->dv) {
  4268               size_t dsize = fm->dvsize += psize;
  4269               fm->dv = p;
  4270               set_size_and_pinuse_of_free_chunk(p, dsize);
  4271               goto postaction;
  4272             }
  4273             else {
  4274               size_t nsize = chunksize(next);
  4275               psize += nsize;
  4276               unlink_chunk(fm, next, nsize);
  4277               set_size_and_pinuse_of_free_chunk(p, psize);
  4278               if (p == fm->dv) {
  4279                 fm->dvsize = psize;
  4280                 goto postaction;
  4281               }
  4282             }
  4283           }
  4284           else
  4285             set_free_with_pinuse(p, psize, next);
  4286           insert_chunk(fm, p, psize);
  4287           check_free_chunk(fm, p);
  4288           goto postaction;
  4289         }
  4290       }
  4291     erroraction:
  4292       USAGE_ERROR_ACTION(fm, p);
  4293     postaction:
  4294       POSTACTION(fm);
  4295     }
  4296   }
  4297 #if !FOOTERS
  4298 #undef fm
  4299 #endif /* FOOTERS */
  4300 }
  4301 
  4302 void* dlcalloc(size_t n_elements, size_t elem_size) {
  4303   void* mem;
  4304   size_t req = 0;
  4305   if (n_elements != 0) {
  4306     req = n_elements * elem_size;
  4307     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4308         (req / n_elements != elem_size))
  4309       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4310   }
  4311   mem = dlmalloc(req);
  4312   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4313     memset(mem, 0, req);
  4314   return mem;
  4315 }
  4316 
  4317 void* dlrealloc(void* oldmem, size_t bytes) {
  4318   if (oldmem == 0)
  4319     return dlmalloc(bytes);
  4320 #ifdef REALLOC_ZERO_BYTES_FREES
  4321   if (bytes == 0) {
  4322     dlfree(oldmem);
  4323     return 0;
  4324   }
  4325 #endif /* REALLOC_ZERO_BYTES_FREES */
  4326   else {
  4327 #if ! FOOTERS
  4328     mstate m = gm;
  4329 #else /* FOOTERS */
  4330     mstate m = get_mstate_for(mem2chunk(oldmem));
  4331     if (!ok_magic(m)) {
  4332       USAGE_ERROR_ACTION(m, oldmem);
  4333       return 0;
  4334     }
  4335 #endif /* FOOTERS */
  4336     return internal_realloc(m, oldmem, bytes);
  4337   }
  4338 }
  4339 
  4340 void* dlmemalign(size_t alignment, size_t bytes) {
  4341   return internal_memalign(gm, alignment, bytes);
  4342 }
  4343 
  4344 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
  4345                                  void* chunks[]) {
  4346   size_t sz = elem_size; /* serves as 1-element array */
  4347   return ialloc(gm, n_elements, &sz, 3, chunks);
  4348 }
  4349 
  4350 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
  4351                                    void* chunks[]) {
  4352   return ialloc(gm, n_elements, sizes, 0, chunks);
  4353 }
  4354 
  4355 void* dlvalloc(size_t bytes) {
  4356   size_t pagesz;
  4357   init_mparams();
  4358   pagesz = mparams.page_size;
  4359   return dlmemalign(pagesz, bytes);
  4360 }
  4361 
  4362 void* dlpvalloc(size_t bytes) {
  4363   size_t pagesz;
  4364   init_mparams();
  4365   pagesz = mparams.page_size;
  4366   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  4367 }
  4368 
  4369 int dlmalloc_trim(size_t pad) {
  4370   int result = 0;
  4371   if (!PREACTION(gm)) {
  4372     result = sys_trim(gm, pad);
  4373     POSTACTION(gm);
  4374   }
  4375   return result;
  4376 }
  4377 
  4378 size_t dlmalloc_footprint(void) {
  4379   return gm->footprint;
  4380 }
  4381 
  4382 size_t dlmalloc_max_footprint(void) {
  4383   return gm->max_footprint;
  4384 }
  4385 
  4386 #if !NO_MALLINFO
  4387 struct mallinfo dlmallinfo(void) {
  4388   return internal_mallinfo(gm);
  4389 }
  4390 #endif /* NO_MALLINFO */
  4391 
  4392 void dlmalloc_stats() {
  4393   internal_malloc_stats(gm);
  4394 }
  4395 
  4396 size_t dlmalloc_usable_size(void* mem) {
  4397   if (mem != 0) {
  4398     mchunkptr p = mem2chunk(mem);
  4399     if (cinuse(p))
  4400       return chunksize(p) - overhead_for(p);
  4401   }
  4402   return 0;
  4403 }
  4404 
  4405 int dlmallopt(int param_number, int value) {
  4406   return change_mparam(param_number, value);
  4407 }
  4408 
  4409 #endif /* !ONLY_MSPACES */
  4410 
  4411 /* ----------------------------- user mspaces ---------------------------- */
  4412 
  4413 #if MSPACES
  4414 
  4415 static mstate init_user_mstate(char* tbase, size_t tsize) {
  4416   size_t msize = pad_request(sizeof(struct malloc_state));
  4417   mchunkptr mn;
  4418   mchunkptr msp = align_as_chunk(tbase);
  4419   mstate m = (mstate)(chunk2mem(msp));
  4420   memset(m, 0, msize);
  4421   INITIAL_LOCK(&m->mutex);
  4422   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  4423   m->seg.base = m->least_addr = tbase;
  4424   m->seg.size = m->footprint = m->max_footprint = tsize;
  4425   m->magic = mparams.magic;
  4426   m->mflags = mparams.default_mflags;
  4427   disable_contiguous(m);
  4428   init_bins(m);
  4429   mn = next_chunk(mem2chunk(m));
  4430   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  4431   check_top_chunk(m, m->top);
  4432   return m;
  4433 }
  4434 
  4435 mspace create_mspace(size_t capacity, int locked) {
  4436   mstate m = 0;
  4437   size_t msize = pad_request(sizeof(struct malloc_state));
  4438   init_mparams(); /* Ensure pagesize etc initialized */
  4439 
  4440   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4441     size_t rs = ((capacity == 0)? mparams.granularity :
  4442                  (capacity + TOP_FOOT_SIZE + msize));
  4443     size_t tsize = granularity_align(rs);
  4444     char* tbase = (char*)(CALL_MMAP(tsize));
  4445     if (tbase != CMFAIL) {
  4446       m = init_user_mstate(tbase, tsize);
  4447       m->seg.sflags = IS_MMAPPED_BIT;
  4448       set_lock(m, locked);
  4449     }
  4450   }
  4451   return (mspace)m;
  4452 }
  4453 
  4454 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  4455   mstate m = 0;
  4456   size_t msize = pad_request(sizeof(struct malloc_state));
  4457   init_mparams(); /* Ensure pagesize etc initialized */
  4458 
  4459   if (capacity > msize + TOP_FOOT_SIZE &&
  4460       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4461     m = init_user_mstate((char*)base, capacity);
  4462     m->seg.sflags = EXTERN_BIT;
  4463     set_lock(m, locked);
  4464   }
  4465   return (mspace)m;
  4466 }
  4467 
  4468 size_t destroy_mspace(mspace msp) {
  4469   size_t freed = 0;
  4470   mstate ms = (mstate)msp;
  4471   if (ok_magic(ms)) {
  4472     msegmentptr sp = &ms->seg;
  4473     while (sp != 0) {
  4474       char* base = sp->base;
  4475       size_t size = sp->size;
  4476       flag_t flag = sp->sflags;
  4477       sp = sp->next;
  4478       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4479           CALL_MUNMAP(base, size) == 0)
  4480         freed += size;
  4481     }
  4482   }
  4483   else {
  4484     USAGE_ERROR_ACTION(ms,ms);
  4485   }
  4486   return freed;
  4487 }
  4488 
  4489 /*
  4490   mspace versions of routines are near-clones of the global
  4491   versions. This is not so nice but better than the alternatives.
  4492 */
  4493 
  4494 
  4495 void* mspace_malloc(mspace msp, size_t bytes) {
  4496   mstate ms = (mstate)msp;
  4497   if (!ok_magic(ms)) {
  4498     USAGE_ERROR_ACTION(ms,ms);
  4499     return 0;
  4500   }
  4501   if (!PREACTION(ms)) {
  4502     void* mem;
  4503     size_t nb;
  4504     if (bytes <= MAX_SMALL_REQUEST) {
  4505       bindex_t idx;
  4506       binmap_t smallbits;
  4507       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4508       idx = small_index(nb);
  4509       smallbits = ms->smallmap >> idx;
  4510 
  4511       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4512         mchunkptr b, p;
  4513         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4514         b = smallbin_at(ms, idx);
  4515         p = b->fd;
  4516         assert(chunksize(p) == small_index2size(idx));
  4517         unlink_first_small_chunk(ms, b, p, idx);
  4518         set_inuse_and_pinuse(ms, p, small_index2size(idx));
  4519         mem = chunk2mem(p);
  4520         check_malloced_chunk(ms, mem, nb);
  4521         goto postaction;
  4522       }
  4523 
  4524       else if (nb > ms->dvsize) {
  4525         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4526           mchunkptr b, p, r;
  4527           size_t rsize;
  4528           bindex_t i;
  4529           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4530           binmap_t leastbit = least_bit(leftbits);
  4531           compute_bit2idx(leastbit, i);
  4532           b = smallbin_at(ms, i);
  4533           p = b->fd;
  4534           assert(chunksize(p) == small_index2size(i));
  4535           unlink_first_small_chunk(ms, b, p, i);
  4536           rsize = small_index2size(i) - nb;
  4537           /* Fit here cannot be remainderless if 4byte sizes */
  4538           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4539             set_inuse_and_pinuse(ms, p, small_index2size(i));
  4540           else {
  4541             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4542             r = chunk_plus_offset(p, nb);
  4543             set_size_and_pinuse_of_free_chunk(r, rsize);
  4544             replace_dv(ms, r, rsize);
  4545           }
  4546           mem = chunk2mem(p);
  4547           check_malloced_chunk(ms, mem, nb);
  4548           goto postaction;
  4549         }
  4550 
  4551         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  4552           check_malloced_chunk(ms, mem, nb);
  4553           goto postaction;
  4554         }
  4555       }
  4556     }
  4557     else if (bytes >= MAX_REQUEST)
  4558       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4559     else {
  4560       nb = pad_request(bytes);
  4561       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  4562         check_malloced_chunk(ms, mem, nb);
  4563         goto postaction;
  4564       }
  4565     }
  4566 
  4567     if (nb <= ms->dvsize) {
  4568       size_t rsize = ms->dvsize - nb;
  4569       mchunkptr p = ms->dv;
  4570       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4571         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  4572         ms->dvsize = rsize;
  4573         set_size_and_pinuse_of_free_chunk(r, rsize);
  4574         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4575       }
  4576       else { /* exhaust dv */
  4577         size_t dvs = ms->dvsize;
  4578         ms->dvsize = 0;
  4579         ms->dv = 0;
  4580         set_inuse_and_pinuse(ms, p, dvs);
  4581       }
  4582       mem = chunk2mem(p);
  4583       check_malloced_chunk(ms, mem, nb);
  4584       goto postaction;
  4585     }
  4586 
  4587     else if (nb < ms->topsize) { /* Split top */
  4588       size_t rsize = ms->topsize -= nb;
  4589       mchunkptr p = ms->top;
  4590       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  4591       r->head = rsize | PINUSE_BIT;
  4592       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4593       mem = chunk2mem(p);
  4594       check_top_chunk(ms, ms->top);
  4595       check_malloced_chunk(ms, mem, nb);
  4596       goto postaction;
  4597     }
  4598 
  4599     mem = sys_alloc(ms, nb);
  4600 
  4601   postaction:
  4602     POSTACTION(ms);
  4603     return mem;
  4604   }
  4605 
  4606   return 0;
  4607 }
  4608 
  4609 void mspace_free(mspace msp, void* mem) {
  4610   if (mem != 0) {
  4611     mchunkptr p  = mem2chunk(mem);
  4612 #if FOOTERS
  4613     mstate fm = get_mstate_for(p);
  4614 #else /* FOOTERS */
  4615     mstate fm = (mstate)msp;
  4616 #endif /* FOOTERS */
  4617     if (!ok_magic(fm)) {
  4618       USAGE_ERROR_ACTION(fm, p);
  4619       return;
  4620     }
  4621     if (!PREACTION(fm)) {
  4622       check_inuse_chunk(fm, p);
  4623       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4624         size_t psize = chunksize(p);
  4625         mchunkptr next = chunk_plus_offset(p, psize);
  4626         if (!pinuse(p)) {
  4627           size_t prevsize = p->prev_foot;
  4628           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4629             prevsize &= ~IS_MMAPPED_BIT;
  4630             psize += prevsize + MMAP_FOOT_PAD;
  4631             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4632               fm->footprint -= psize;
  4633             goto postaction;
  4634           }
  4635           else {
  4636             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4637             psize += prevsize;
  4638             p = prev;
  4639             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4640               if (p != fm->dv) {
  4641                 unlink_chunk(fm, p, prevsize);
  4642               }
  4643               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4644                 fm->dvsize = psize;
  4645                 set_free_with_pinuse(p, psize, next);
  4646                 goto postaction;
  4647               }
  4648             }
  4649             else
  4650               goto erroraction;
  4651           }
  4652         }
  4653 
  4654         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4655           if (!cinuse(next)) {  /* consolidate forward */
  4656             if (next == fm->top) {
  4657               size_t tsize = fm->topsize += psize;
  4658               fm->top = p;
  4659               p->head = tsize | PINUSE_BIT;
  4660               if (p == fm->dv) {
  4661                 fm->dv = 0;
  4662                 fm->dvsize = 0;
  4663               }
  4664               if (should_trim(fm, tsize))
  4665                 sys_trim(fm, 0);
  4666               goto postaction;
  4667             }
  4668             else if (next == fm->dv) {
  4669               size_t dsize = fm->dvsize += psize;
  4670               fm->dv = p;
  4671               set_size_and_pinuse_of_free_chunk(p, dsize);
  4672               goto postaction;
  4673             }
  4674             else {
  4675               size_t nsize = chunksize(next);
  4676               psize += nsize;
  4677               unlink_chunk(fm, next, nsize);
  4678               set_size_and_pinuse_of_free_chunk(p, psize);
  4679               if (p == fm->dv) {
  4680                 fm->dvsize = psize;
  4681                 goto postaction;
  4682               }
  4683             }
  4684           }
  4685           else
  4686             set_free_with_pinuse(p, psize, next);
  4687           insert_chunk(fm, p, psize);
  4688           check_free_chunk(fm, p);
  4689           goto postaction;
  4690         }
  4691       }
  4692     erroraction:
  4693       USAGE_ERROR_ACTION(fm, p);
  4694     postaction:
  4695       POSTACTION(fm);
  4696     }
  4697   }
  4698 }
  4699 
  4700 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
  4701   void* mem;
  4702   size_t req = 0;
  4703   mstate ms = (mstate)msp;
  4704   if (!ok_magic(ms)) {
  4705     USAGE_ERROR_ACTION(ms,ms);
  4706     return 0;
  4707   }
  4708   if (n_elements != 0) {
  4709     req = n_elements * elem_size;
  4710     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4711         (req / n_elements != elem_size))
  4712       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4713   }
  4714   mem = internal_malloc(ms, req);
  4715   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4716     memset(mem, 0, req);
  4717   return mem;
  4718 }
  4719 
  4720 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
  4721   if (oldmem == 0)
  4722     return mspace_malloc(msp, bytes);
  4723 #ifdef REALLOC_ZERO_BYTES_FREES
  4724   if (bytes == 0) {
  4725     mspace_free(msp, oldmem);
  4726     return 0;
  4727   }
  4728 #endif /* REALLOC_ZERO_BYTES_FREES */
  4729   else {
  4730 #if FOOTERS
  4731     mchunkptr p  = mem2chunk(oldmem);
  4732     mstate ms = get_mstate_for(p);
  4733 #else /* FOOTERS */
  4734     mstate ms = (mstate)msp;
  4735 #endif /* FOOTERS */
  4736     if (!ok_magic(ms)) {
  4737       USAGE_ERROR_ACTION(ms,ms);
  4738       return 0;
  4739     }
  4740     return internal_realloc(ms, oldmem, bytes);
  4741   }
  4742 }
  4743 
  4744 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
  4745   mstate ms = (mstate)msp;
  4746   if (!ok_magic(ms)) {
  4747     USAGE_ERROR_ACTION(ms,ms);
  4748     return 0;
  4749   }
  4750   return internal_memalign(ms, alignment, bytes);
  4751 }
  4752 
  4753 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  4754                                  size_t elem_size, void* chunks[]) {
  4755   size_t sz = elem_size; /* serves as 1-element array */
  4756   mstate ms = (mstate)msp;
  4757   if (!ok_magic(ms)) {
  4758     USAGE_ERROR_ACTION(ms,ms);
  4759     return 0;
  4760   }
  4761   return ialloc(ms, n_elements, &sz, 3, chunks);
  4762 }
  4763 
  4764 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  4765                                    size_t sizes[], void* chunks[]) {
  4766   mstate ms = (mstate)msp;
  4767   if (!ok_magic(ms)) {
  4768     USAGE_ERROR_ACTION(ms,ms);
  4769     return 0;
  4770   }
  4771   return ialloc(ms, n_elements, sizes, 0, chunks);
  4772 }
  4773 
  4774 int mspace_trim(mspace msp, size_t pad) {
  4775   int result = 0;
  4776   mstate ms = (mstate)msp;
  4777   if (ok_magic(ms)) {
  4778     if (!PREACTION(ms)) {
  4779       result = sys_trim(ms, pad);
  4780       POSTACTION(ms);
  4781     }
  4782   }
  4783   else {
  4784     USAGE_ERROR_ACTION(ms,ms);
  4785   }
  4786   return result;
  4787 }
  4788 
  4789 void mspace_malloc_stats(mspace msp) {
  4790   mstate ms = (mstate)msp;
  4791   if (ok_magic(ms)) {
  4792     internal_malloc_stats(ms);
  4793   }
  4794   else {
  4795     USAGE_ERROR_ACTION(ms,ms);
  4796   }
  4797 }
  4798 
  4799 size_t mspace_footprint(mspace msp) {
  4800   size_t result;
  4801   mstate ms = (mstate)msp;
  4802   if (ok_magic(ms)) {
  4803     result = ms->footprint;
  4804   }
  4805   USAGE_ERROR_ACTION(ms,ms);
  4806   return result;
  4807 }
  4808 
  4809 
  4810 size_t mspace_max_footprint(mspace msp) {
  4811   size_t result;
  4812   mstate ms = (mstate)msp;
  4813   if (ok_magic(ms)) {
  4814     result = ms->max_footprint;
  4815   }
  4816   USAGE_ERROR_ACTION(ms,ms);
  4817   return result;
  4818 }
  4819 
  4820 
  4821 #if !NO_MALLINFO
  4822 struct mallinfo mspace_mallinfo(mspace msp) {
  4823   mstate ms = (mstate)msp;
  4824   if (!ok_magic(ms)) {
  4825     USAGE_ERROR_ACTION(ms,ms);
  4826   }
  4827   return internal_mallinfo(ms);
  4828 }
  4829 #endif /* NO_MALLINFO */
  4830 
  4831 int mspace_mallopt(int param_number, int value) {
  4832   return change_mparam(param_number, value);
  4833 }
  4834 
  4835 #endif /* MSPACES */
  4836 
  4837 /* -------------------- Alternative MORECORE functions ------------------- */
  4838 
  4839 /*
  4840   Guidelines for creating a custom version of MORECORE:
  4841 
  4842   * For best performance, MORECORE should allocate in multiples of pagesize.
  4843   * MORECORE may allocate more memory than requested. (Or even less,
  4844       but this will usually result in a malloc failure.)
  4845   * MORECORE must not allocate memory when given argument zero, but
  4846       instead return one past the end address of memory from previous
  4847       nonzero call.
  4848   * For best performance, consecutive calls to MORECORE with positive
  4849       arguments should return increasing addresses, indicating that
  4850       space has been contiguously extended.
  4851   * Even though consecutive calls to MORECORE need not return contiguous
  4852       addresses, it must be OK for malloc'ed chunks to span multiple
  4853       regions in those cases where they do happen to be contiguous.
  4854   * MORECORE need not handle negative arguments -- it may instead
  4855       just return MFAIL when given negative arguments.
  4856       Negative arguments are always multiples of pagesize. MORECORE
  4857       must not misinterpret negative args as large positive unsigned
  4858       args. You can suppress all such calls from even occurring by defining
  4859       MORECORE_CANNOT_TRIM,
  4860 
  4861   As an example alternative MORECORE, here is a custom allocator
  4862   kindly contributed for pre-OSX macOS.  It uses virtually but not
  4863   necessarily physically contiguous non-paged memory (locked in,
  4864   present and won't get swapped out).  You can use it by uncommenting
  4865   this section, adding some #includes, and setting up the appropriate
  4866   defines above:
  4867 
  4868       #define MORECORE osMoreCore
  4869 
  4870   There is also a shutdown routine that should somehow be called for
  4871   cleanup upon program exit.
  4872 
  4873   #define MAX_POOL_ENTRIES 100
  4874   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  4875   static int next_os_pool;
  4876   void *our_os_pools[MAX_POOL_ENTRIES];
  4877 
  4878   void *osMoreCore(int size)
  4879   {
  4880     void *ptr = 0;
  4881     static void *sbrk_top = 0;
  4882 
  4883     if (size > 0)
  4884     {
  4885       if (size < MINIMUM_MORECORE_SIZE)
  4886          size = MINIMUM_MORECORE_SIZE;
  4887       if (CurrentExecutionLevel() == kTaskLevel)
  4888          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  4889       if (ptr == 0)
  4890       {
  4891         return (void *) MFAIL;
  4892       }
  4893       // save ptrs so they can be freed during cleanup
  4894       our_os_pools[next_os_pool] = ptr;
  4895       next_os_pool++;
  4896       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  4897       sbrk_top = (char *) ptr + size;
  4898       return ptr;
  4899     }
  4900     else if (size < 0)
  4901     {
  4902       // we don't currently support shrink behavior
  4903       return (void *) MFAIL;
  4904     }
  4905     else
  4906     {
  4907       return sbrk_top;
  4908     }
  4909   }
  4910 
  4911   // cleanup any allocated memory pools
  4912   // called as last thing before shutting down driver
  4913 
  4914   void osCleanupMem(void)
  4915   {
  4916     void **ptr;
  4917 
  4918     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  4919       if (*ptr)
  4920       {
  4921          PoolDeallocate(*ptr);
  4922          *ptr = 0;
  4923       }
  4924   }
  4925 
  4926 */
  4927 
  4928 
  4929 /* -----------------------------------------------------------------------
  4930 History:
  4931     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  4932       * Add max_footprint functions
  4933       * Ensure all appropriate literals are size_t
  4934       * Fix conditional compilation problem for some #define settings
  4935       * Avoid concatenating segments with the one provided
  4936         in create_mspace_with_base
  4937       * Rename some variables to avoid compiler shadowing warnings
  4938       * Use explicit lock initialization.
  4939       * Better handling of sbrk interference.
  4940       * Simplify and fix segment insertion, trimming and mspace_destroy
  4941       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  4942       * Thanks especially to Dennis Flanagan for help on these.
  4943 
  4944     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  4945       * Fix memalign brace error.
  4946 
  4947     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  4948       * Fix improper #endif nesting in C++
  4949       * Add explicit casts needed for C++
  4950 
  4951     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  4952       * Use trees for large bins
  4953       * Support mspaces
  4954       * Use segments to unify sbrk-based and mmap-based system allocation,
  4955         removing need for emulation on most platforms without sbrk.
  4956       * Default safety checks
  4957       * Optional footer checks. Thanks to William Robertson for the idea.
  4958       * Internal code refactoring
  4959       * Incorporate suggestions and platform-specific changes.
  4960         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  4961         Aaron Bachmann,  Emery Berger, and others.
  4962       * Speed up non-fastbin processing enough to remove fastbins.
  4963       * Remove useless cfree() to avoid conflicts with other apps.
  4964       * Remove internal memcpy, memset. Compilers handle builtins better.
  4965       * Remove some options that no one ever used and rename others.
  4966 
  4967     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  4968       * Fix malloc_state bitmap array misdeclaration
  4969 
  4970     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  4971       * Allow tuning of FIRST_SORTED_BIN_SIZE
  4972       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  4973       * Better detection and support for non-contiguousness of MORECORE.
  4974         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  4975       * Bypass most of malloc if no frees. Thanks To Emery Berger.
  4976       * Fix freeing of old top non-contiguous chunk im sysmalloc.
  4977       * Raised default trim and map thresholds to 256K.
  4978       * Fix mmap-related #defines. Thanks to Lubos Lunak.
  4979       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  4980       * Branch-free bin calculation
  4981       * Default trim and mmap thresholds now 256K.
  4982 
  4983     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  4984       * Introduce independent_comalloc and independent_calloc.
  4985         Thanks to Michael Pachos for motivation and help.
  4986       * Make optional .h file available
  4987       * Allow > 2GB requests on 32bit systems.
  4988       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  4989         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  4990         and Anonymous.
  4991       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  4992         helping test this.)
  4993       * memalign: check alignment arg
  4994       * realloc: don't try to shift chunks backwards, since this
  4995         leads to  more fragmentation in some programs and doesn't
  4996         seem to help in any others.
  4997       * Collect all cases in malloc requiring system memory into sysmalloc
  4998       * Use mmap as backup to sbrk
  4999       * Place all internal state in malloc_state
  5000       * Introduce fastbins (although similar to 2.5.1)
  5001       * Many minor tunings and cosmetic improvements
  5002       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5003       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5004         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5005       * Include errno.h to support default failure action.
  5006 
  5007     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5008       * return null for negative arguments
  5009       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5010          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5011           (e.g. WIN32 platforms)
  5012          * Cleanup header file inclusion for WIN32 platforms
  5013          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5014          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5015            memory allocation routines
  5016          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5017          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5018            usage of 'assert' in non-WIN32 code
  5019          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5020            avoid infinite loop
  5021       * Always call 'fREe()' rather than 'free()'
  5022 
  5023     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5024       * Fixed ordering problem with boundary-stamping
  5025 
  5026     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5027       * Added pvalloc, as recommended by H.J. Liu
  5028       * Added 64bit pointer support mainly from Wolfram Gloger
  5029       * Added anonymously donated WIN32 sbrk emulation
  5030       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5031       * malloc_extend_top: fix mask error that caused wastage after
  5032         foreign sbrks
  5033       * Add linux mremap support code from HJ Liu
  5034 
  5035     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5036       * Integrated most documentation with the code.
  5037       * Add support for mmap, with help from
  5038         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5039       * Use last_remainder in more cases.
  5040       * Pack bins using idea from  colin@nyx10.cs.du.edu
  5041       * Use ordered bins instead of best-fit threshhold
  5042       * Eliminate block-local decls to simplify tracing and debugging.
  5043       * Support another case of realloc via move into top
  5044       * Fix error occuring when initial sbrk_base not word-aligned.
  5045       * Rely on page size for units instead of SBRK_UNIT to
  5046         avoid surprises about sbrk alignment conventions.
  5047       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5048         (raymond@es.ele.tue.nl) for the suggestion.
  5049       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5050       * More precautions for cases where other routines call sbrk,
  5051         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5052       * Added macros etc., allowing use in linux libc from
  5053         H.J. Lu (hjl@gnu.ai.mit.edu)
  5054       * Inverted this history list
  5055 
  5056     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5057       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5058       * Removed all preallocation code since under current scheme
  5059         the work required to undo bad preallocations exceeds
  5060         the work saved in good cases for most test programs.
  5061       * No longer use return list or unconsolidated bins since
  5062         no scheme using them consistently outperforms those that don't
  5063         given above changes.
  5064       * Use best fit for very large chunks to prevent some worst-cases.
  5065       * Added some support for debugging
  5066 
  5067     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5068       * Removed footers when chunks are in use. Thanks to
  5069         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5070 
  5071     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5072       * Added malloc_trim, with help from Wolfram Gloger
  5073         (wmglo@Dent.MED.Uni-Muenchen.DE).
  5074 
  5075     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5076 
  5077     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5078       * realloc: try to expand in both directions
  5079       * malloc: swap order of clean-bin strategy;
  5080       * realloc: only conditionally expand backwards
  5081       * Try not to scavenge used bins
  5082       * Use bin counts as a guide to preallocation
  5083       * Occasionally bin return list chunks in first scan
  5084       * Add a few optimizations from colin@nyx10.cs.du.edu
  5085 
  5086     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5087       * faster bin computation & slightly different binning
  5088       * merged all consolidations to one part of malloc proper
  5089          (eliminating old malloc_find_space & malloc_clean_bin)
  5090       * Scan 2 returns chunks (not just 1)
  5091       * Propagate failure in realloc if malloc returns 0
  5092       * Add stuff to allow compilation on non-ANSI compilers
  5093           from kpv@research.att.com
  5094 
  5095     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5096       * removed potential for odd address access in prev_chunk
  5097       * removed dependency on getpagesize.h
  5098       * misc cosmetics and a bit more internal documentation
  5099       * anticosmetics: mangled names in macros to evade debugger strangeness
  5100       * tested on sparc, hp-700, dec-mips, rs6000
  5101           with gcc & native cc (hp, dec only) allowing
  5102           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5103 
  5104     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5105       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5106          structure of old version,  but most details differ.)
  5107  
  5108 */
  5109 
  5110 #endif /* !HAVE_MALLOC */