src/stdlib/SDL_malloc.c
author Ryan C. Gordon
Wed, 18 Jan 2017 02:11:56 -0500
changeset 10817 efc103e60c5b
parent 10737 3406a0f8b041
child 11489 438be648de4c
permissions -rw-r--r--
audio: Several fixes to "simple" resampler (thanks, Vitaly!).

Fixes Bugzilla #3551.
     1 /*
     2   Simple DirectMedia Layer
     3   Copyright (C) 1997-2017 Sam Lantinga <slouken@libsdl.org>
     4 
     5   This software is provided 'as-is', without any express or implied
     6   warranty.  In no event will the authors be held liable for any damages
     7   arising from the use of this software.
     8 
     9   Permission is granted to anyone to use this software for any purpose,
    10   including commercial applications, and to alter it and redistribute it
    11   freely, subject to the following restrictions:
    12 
    13   1. The origin of this software must not be misrepresented; you must not
    14      claim that you wrote the original software. If you use this software
    15      in a product, an acknowledgment in the product documentation would be
    16      appreciated but is not required.
    17   2. Altered source versions must be plainly marked as such, and must not be
    18      misrepresented as being the original software.
    19   3. This notice may not be removed or altered from any source distribution.
    20 */
    21 
    22 #if defined(__clang_analyzer__) && !defined(SDL_DISABLE_ANALYZE_MACROS)
    23 #define SDL_DISABLE_ANALYZE_MACROS 1
    24 #endif
    25 
    26 #include "../SDL_internal.h"
    27 
    28 /* This file contains portable memory management functions for SDL */
    29 
    30 #include "SDL_stdinc.h"
    31 
    32 #if defined(HAVE_MALLOC)
    33 
    34 void *SDL_malloc(size_t size)
    35 {
    36     return malloc(size);
    37 }
    38 
    39 void *SDL_calloc(size_t nmemb, size_t size)
    40 {
    41     return calloc(nmemb, size);
    42 }
    43 
    44 void *SDL_realloc(void *ptr, size_t size)
    45 {
    46     return realloc(ptr, size);
    47 }
    48 
    49 void SDL_free(void *ptr)
    50 {
    51     free(ptr);
    52 }
    53 
    54 #else  /* the rest of this is a LOT of tapdancing to implement malloc. :) */
    55 
    56 #define LACKS_SYS_TYPES_H
    57 #define LACKS_STDIO_H
    58 #define LACKS_STRINGS_H
    59 #define LACKS_STRING_H
    60 #define LACKS_STDLIB_H
    61 #define ABORT
    62 #define USE_LOCKS 1
    63 
    64 /*
    65   This is a version (aka dlmalloc) of malloc/free/realloc written by
    66   Doug Lea and released to the public domain, as explained at
    67   http://creativecommons.org/licenses/publicdomain.  Send questions,
    68   comments, complaints, performance data, etc to dl@cs.oswego.edu
    69 
    70 * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    71 
    72    Note: There may be an updated version of this malloc obtainable at
    73            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    74          Check before installing!
    75 
    76 * Quickstart
    77 
    78   This library is all in one file to simplify the most common usage:
    79   ftp it, compile it (-O3), and link it into another program. All of
    80   the compile-time options default to reasonable values for use on
    81   most platforms.  You might later want to step through various
    82   compile-time and dynamic tuning options.
    83 
    84   For convenience, an include file for code using this malloc is at:
    85      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    86   You don't really need this .h file unless you call functions not
    87   defined in your system include files.  The .h file contains only the
    88   excerpts from this file needed for using this malloc on ANSI C/C++
    89   systems, so long as you haven't changed compile-time options about
    90   naming and tuning parameters.  If you do, then you can create your
    91   own malloc.h that does include all settings by cutting at the point
    92   indicated below. Note that you may already by default be using a C
    93   library containing a malloc that is based on some version of this
    94   malloc (for example in linux). You might still want to use the one
    95   in this file to customize settings or to avoid overheads associated
    96   with library versions.
    97 
    98 * Vital statistics:
    99 
   100   Supported pointer/size_t representation:       4 or 8 bytes
   101        size_t MUST be an unsigned type of the same width as
   102        pointers. (If you are using an ancient system that declares
   103        size_t as a signed type, or need it to be a different width
   104        than pointers, you can use a previous release of this malloc
   105        (e.g. 2.7.2) supporting these.)
   106 
   107   Alignment:                                     8 bytes (default)
   108        This suffices for nearly all current machines and C compilers.
   109        However, you can define MALLOC_ALIGNMENT to be wider than this
   110        if necessary (up to 128bytes), at the expense of using more space.
   111 
   112   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
   113                                           8 or 16 bytes (if 8byte sizes)
   114        Each malloced chunk has a hidden word of overhead holding size
   115        and status information, and additional cross-check word
   116        if FOOTERS is defined.
   117 
   118   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
   119                           8-byte ptrs:  32 bytes    (including overhead)
   120 
   121        Even a request for zero bytes (i.e., malloc(0)) returns a
   122        pointer to something of the minimum allocatable size.
   123        The maximum overhead wastage (i.e., number of extra bytes
   124        allocated than were requested in malloc) is less than or equal
   125        to the minimum size, except for requests >= mmap_threshold that
   126        are serviced via mmap(), where the worst case wastage is about
   127        32 bytes plus the remainder from a system page (the minimal
   128        mmap unit); typically 4096 or 8192 bytes.
   129 
   130   Security: static-safe; optionally more or less
   131        The "security" of malloc refers to the ability of malicious
   132        code to accentuate the effects of errors (for example, freeing
   133        space that is not currently malloc'ed or overwriting past the
   134        ends of chunks) in code that calls malloc.  This malloc
   135        guarantees not to modify any memory locations below the base of
   136        heap, i.e., static variables, even in the presence of usage
   137        errors.  The routines additionally detect most improper frees
   138        and reallocs.  All this holds as long as the static bookkeeping
   139        for malloc itself is not corrupted by some other means.  This
   140        is only one aspect of security -- these checks do not, and
   141        cannot, detect all possible programming errors.
   142 
   143        If FOOTERS is defined nonzero, then each allocated chunk
   144        carries an additional check word to verify that it was malloced
   145        from its space.  These check words are the same within each
   146        execution of a program using malloc, but differ across
   147        executions, so externally crafted fake chunks cannot be
   148        freed. This improves security by rejecting frees/reallocs that
   149        could corrupt heap memory, in addition to the checks preventing
   150        writes to statics that are always on.  This may further improve
   151        security at the expense of time and space overhead.  (Note that
   152        FOOTERS may also be worth using with MSPACES.)
   153 
   154        By default detected errors cause the program to abort (calling
   155        "abort()"). You can override this to instead proceed past
   156        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   157        has no effect, and a malloc that encounters a bad address
   158        caused by user overwrites will ignore the bad address by
   159        dropping pointers and indices to all known memory. This may
   160        be appropriate for programs that should continue if at all
   161        possible in the face of programming errors, although they may
   162        run out of memory because dropped memory is never reclaimed.
   163 
   164        If you don't like either of these options, you can define
   165        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   166        else. And if if you are sure that your program using malloc has
   167        no errors or vulnerabilities, you can define INSECURE to 1,
   168        which might (or might not) provide a small performance improvement.
   169 
   170   Thread-safety: NOT thread-safe unless USE_LOCKS defined
   171        When USE_LOCKS is defined, each public call to malloc, free,
   172        etc is surrounded with either a pthread mutex or a win32
   173        spinlock (depending on WIN32). This is not especially fast, and
   174        can be a major bottleneck.  It is designed only to provide
   175        minimal protection in concurrent environments, and to provide a
   176        basis for extensions.  If you are using malloc in a concurrent
   177        program, consider instead using ptmalloc, which is derived from
   178        a version of this malloc. (See http://www.malloc.de).
   179 
   180   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   181        This malloc can use unix sbrk or any emulation (invoked using
   182        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   183        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   184        memory.  On most unix systems, it tends to work best if both
   185        MORECORE and MMAP are enabled.  On Win32, it uses emulations
   186        based on VirtualAlloc. It also uses common C library functions
   187        like memset.
   188 
   189   Compliance: I believe it is compliant with the Single Unix Specification
   190        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   191        others as well.
   192 
   193 * Overview of algorithms
   194 
   195   This is not the fastest, most space-conserving, most portable, or
   196   most tunable malloc ever written. However it is among the fastest
   197   while also being among the most space-conserving, portable and
   198   tunable.  Consistent balance across these factors results in a good
   199   general-purpose allocator for malloc-intensive programs.
   200 
   201   In most ways, this malloc is a best-fit allocator. Generally, it
   202   chooses the best-fitting existing chunk for a request, with ties
   203   broken in approximately least-recently-used order. (This strategy
   204   normally maintains low fragmentation.) However, for requests less
   205   than 256bytes, it deviates from best-fit when there is not an
   206   exactly fitting available chunk by preferring to use space adjacent
   207   to that used for the previous small request, as well as by breaking
   208   ties in approximately most-recently-used order. (These enhance
   209   locality of series of small allocations.)  And for very large requests
   210   (>= 256Kb by default), it relies on system memory mapping
   211   facilities, if supported.  (This helps avoid carrying around and
   212   possibly fragmenting memory used only for large chunks.)
   213 
   214   All operations (except malloc_stats and mallinfo) have execution
   215   times that are bounded by a constant factor of the number of bits in
   216   a size_t, not counting any clearing in calloc or copying in realloc,
   217   or actions surrounding MORECORE and MMAP that have times
   218   proportional to the number of non-contiguous regions returned by
   219   system allocation routines, which is often just 1.
   220 
   221   The implementation is not very modular and seriously overuses
   222   macros. Perhaps someday all C compilers will do as good a job
   223   inlining modular code as can now be done by brute-force expansion,
   224   but now, enough of them seem not to.
   225 
   226   Some compilers issue a lot of warnings about code that is
   227   dead/unreachable only on some platforms, and also about intentional
   228   uses of negation on unsigned types. All known cases of each can be
   229   ignored.
   230 
   231   For a longer but out of date high-level description, see
   232      http://gee.cs.oswego.edu/dl/html/malloc.html
   233 
   234 * MSPACES
   235   If MSPACES is defined, then in addition to malloc, free, etc.,
   236   this file also defines mspace_malloc, mspace_free, etc. These
   237   are versions of malloc routines that take an "mspace" argument
   238   obtained using create_mspace, to control all internal bookkeeping.
   239   If ONLY_MSPACES is defined, only these versions are compiled.
   240   So if you would like to use this allocator for only some allocations,
   241   and your system malloc for others, you can compile with
   242   ONLY_MSPACES and then do something like...
   243     static mspace mymspace = create_mspace(0,0); // for example
   244     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   245 
   246   (Note: If you only need one instance of an mspace, you can instead
   247   use "USE_DL_PREFIX" to relabel the global malloc.)
   248 
   249   You can similarly create thread-local allocators by storing
   250   mspaces as thread-locals. For example:
   251     static __thread mspace tlms = 0;
   252     void*  tlmalloc(size_t bytes) {
   253       if (tlms == 0) tlms = create_mspace(0, 0);
   254       return mspace_malloc(tlms, bytes);
   255     }
   256     void  tlfree(void* mem) { mspace_free(tlms, mem); }
   257 
   258   Unless FOOTERS is defined, each mspace is completely independent.
   259   You cannot allocate from one and free to another (although
   260   conformance is only weakly checked, so usage errors are not always
   261   caught). If FOOTERS is defined, then each chunk carries around a tag
   262   indicating its originating mspace, and frees are directed to their
   263   originating spaces.
   264 
   265  -------------------------  Compile-time options ---------------------------
   266 
   267 Be careful in setting #define values for numerical constants of type
   268 size_t. On some systems, literal values are not automatically extended
   269 to size_t precision unless they are explicitly casted.
   270 
   271 WIN32                    default: defined if _WIN32 defined
   272   Defining WIN32 sets up defaults for MS environment and compilers.
   273   Otherwise defaults are for unix.
   274 
   275 MALLOC_ALIGNMENT         default: (size_t)8
   276   Controls the minimum alignment for malloc'ed chunks.  It must be a
   277   power of two and at least 8, even on machines for which smaller
   278   alignments would suffice. It may be defined as larger than this
   279   though. Note however that code and data structures are optimized for
   280   the case of 8-byte alignment.
   281 
   282 MSPACES                  default: 0 (false)
   283   If true, compile in support for independent allocation spaces.
   284   This is only supported if HAVE_MMAP is true.
   285 
   286 ONLY_MSPACES             default: 0 (false)
   287   If true, only compile in mspace versions, not regular versions.
   288 
   289 USE_LOCKS                default: 0 (false)
   290   Causes each call to each public routine to be surrounded with
   291   pthread or WIN32 mutex lock/unlock. (If set true, this can be
   292   overridden on a per-mspace basis for mspace versions.)
   293 
   294 FOOTERS                  default: 0
   295   If true, provide extra checking and dispatching by placing
   296   information in the footers of allocated chunks. This adds
   297   space and time overhead.
   298 
   299 INSECURE                 default: 0
   300   If true, omit checks for usage errors and heap space overwrites.
   301 
   302 USE_DL_PREFIX            default: NOT defined
   303   Causes compiler to prefix all public routines with the string 'dl'.
   304   This can be useful when you only want to use this malloc in one part
   305   of a program, using your regular system malloc elsewhere.
   306 
   307 ABORT                    default: defined as abort()
   308   Defines how to abort on failed checks.  On most systems, a failed
   309   check cannot die with an "assert" or even print an informative
   310   message, because the underlying print routines in turn call malloc,
   311   which will fail again.  Generally, the best policy is to simply call
   312   abort(). It's not very useful to do more than this because many
   313   errors due to overwriting will show up as address faults (null, odd
   314   addresses etc) rather than malloc-triggered checks, so will also
   315   abort.  Also, most compilers know that abort() does not return, so
   316   can better optimize code conditionally calling it.
   317 
   318 PROCEED_ON_ERROR           default: defined as 0 (false)
   319   Controls whether detected bad addresses cause them to bypassed
   320   rather than aborting. If set, detected bad arguments to free and
   321   realloc are ignored. And all bookkeeping information is zeroed out
   322   upon a detected overwrite of freed heap space, thus losing the
   323   ability to ever return it from malloc again, but enabling the
   324   application to proceed. If PROCEED_ON_ERROR is defined, the
   325   static variable malloc_corruption_error_count is compiled in
   326   and can be examined to see if errors have occurred. This option
   327   generates slower code than the default abort policy.
   328 
   329 DEBUG                    default: NOT defined
   330   The DEBUG setting is mainly intended for people trying to modify
   331   this code or diagnose problems when porting to new platforms.
   332   However, it may also be able to better isolate user errors than just
   333   using runtime checks.  The assertions in the check routines spell
   334   out in more detail the assumptions and invariants underlying the
   335   algorithms.  The checking is fairly extensive, and will slow down
   336   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   337   set will attempt to check every non-mmapped allocated and free chunk
   338   in the course of computing the summaries.
   339 
   340 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   341   Debugging assertion failures can be nearly impossible if your
   342   version of the assert macro causes malloc to be called, which will
   343   lead to a cascade of further failures, blowing the runtime stack.
   344   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   345   which will usually make debugging easier.
   346 
   347 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   348   The action to take before "return 0" when malloc fails to be able to
   349   return memory because there is none available.
   350 
   351 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   352   True if this system supports sbrk or an emulation of it.
   353 
   354 MORECORE                  default: sbrk
   355   The name of the sbrk-style system routine to call to obtain more
   356   memory.  See below for guidance on writing custom MORECORE
   357   functions. The type of the argument to sbrk/MORECORE varies across
   358   systems.  It cannot be size_t, because it supports negative
   359   arguments, so it is normally the signed type of the same width as
   360   size_t (sometimes declared as "intptr_t").  It doesn't much matter
   361   though. Internally, we only call it with arguments less than half
   362   the max value of a size_t, which should work across all reasonable
   363   possibilities, although sometimes generating compiler warnings.  See
   364   near the end of this file for guidelines for creating a custom
   365   version of MORECORE.
   366 
   367 MORECORE_CONTIGUOUS       default: 1 (true)
   368   If true, take advantage of fact that consecutive calls to MORECORE
   369   with positive arguments always return contiguous increasing
   370   addresses.  This is true of unix sbrk. It does not hurt too much to
   371   set it true anyway, since malloc copes with non-contiguities.
   372   Setting it false when definitely non-contiguous saves time
   373   and possibly wasted space it would take to discover this though.
   374 
   375 MORECORE_CANNOT_TRIM      default: NOT defined
   376   True if MORECORE cannot release space back to the system when given
   377   negative arguments. This is generally necessary only if you are
   378   using a hand-crafted MORECORE function that cannot handle negative
   379   arguments.
   380 
   381 HAVE_MMAP                 default: 1 (true)
   382   True if this system supports mmap or an emulation of it.  If so, and
   383   HAVE_MORECORE is not true, MMAP is used for all system
   384   allocation. If set and HAVE_MORECORE is true as well, MMAP is
   385   primarily used to directly allocate very large blocks. It is also
   386   used as a backup strategy in cases where MORECORE fails to provide
   387   space from system. Note: A single call to MUNMAP is assumed to be
   388   able to unmap memory that may have be allocated using multiple calls
   389   to MMAP, so long as they are adjacent.
   390 
   391 HAVE_MREMAP               default: 1 on linux, else 0
   392   If true realloc() uses mremap() to re-allocate large blocks and
   393   extend or shrink allocation spaces.
   394 
   395 MMAP_CLEARS               default: 1 on unix
   396   True if mmap clears memory so calloc doesn't need to. This is true
   397   for standard unix mmap using /dev/zero.
   398 
   399 USE_BUILTIN_FFS            default: 0 (i.e., not used)
   400   Causes malloc to use the builtin ffs() function to compute indices.
   401   Some compilers may recognize and intrinsify ffs to be faster than the
   402   supplied C version. Also, the case of x86 using gcc is special-cased
   403   to an asm instruction, so is already as fast as it can be, and so
   404   this setting has no effect. (On most x86s, the asm version is only
   405   slightly faster than the C version.)
   406 
   407 malloc_getpagesize         default: derive from system includes, or 4096.
   408   The system page size. To the extent possible, this malloc manages
   409   memory from the system in page-size units.  This may be (and
   410   usually is) a function rather than a constant. This is ignored
   411   if WIN32, where page size is determined using getSystemInfo during
   412   initialization.
   413 
   414 USE_DEV_RANDOM             default: 0 (i.e., not used)
   415   Causes malloc to use /dev/random to initialize secure magic seed for
   416   stamping footers. Otherwise, the current time is used.
   417 
   418 NO_MALLINFO                default: 0
   419   If defined, don't compile "mallinfo". This can be a simple way
   420   of dealing with mismatches between system declarations and
   421   those in this file.
   422 
   423 MALLINFO_FIELD_TYPE        default: size_t
   424   The type of the fields in the mallinfo struct. This was originally
   425   defined as "int" in SVID etc, but is more usefully defined as
   426   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   427 
   428 REALLOC_ZERO_BYTES_FREES    default: not defined
   429   This should be set if a call to realloc with zero bytes should
   430   be the same as a call to free. Some people think it should. Otherwise,
   431   since this malloc returns a unique pointer for malloc(0), so does
   432   realloc(p, 0).
   433 
   434 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   435 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   436 LACKS_STDLIB_H                default: NOT defined unless on WIN32
   437   Define these if your system does not have these header files.
   438   You might need to manually insert some of the declarations they provide.
   439 
   440 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   441                                 system_info.dwAllocationGranularity in WIN32,
   442                                 otherwise 64K.
   443       Also settable using mallopt(M_GRANULARITY, x)
   444   The unit for allocating and deallocating memory from the system.  On
   445   most systems with contiguous MORECORE, there is no reason to
   446   make this more than a page. However, systems with MMAP tend to
   447   either require or encourage larger granularities.  You can increase
   448   this value to prevent system allocation functions to be called so
   449   often, especially if they are slow.  The value must be at least one
   450   page and must be a power of two.  Setting to 0 causes initialization
   451   to either page size or win32 region size.  (Note: In previous
   452   versions of malloc, the equivalent of this option was called
   453   "TOP_PAD")
   454 
   455 DEFAULT_TRIM_THRESHOLD    default: 2MB
   456       Also settable using mallopt(M_TRIM_THRESHOLD, x)
   457   The maximum amount of unused top-most memory to keep before
   458   releasing via malloc_trim in free().  Automatic trimming is mainly
   459   useful in long-lived programs using contiguous MORECORE.  Because
   460   trimming via sbrk can be slow on some systems, and can sometimes be
   461   wasteful (in cases where programs immediately afterward allocate
   462   more large chunks) the value should be high enough so that your
   463   overall system performance would improve by releasing this much
   464   memory.  As a rough guide, you might set to a value close to the
   465   average size of a process (program) running on your system.
   466   Releasing this much memory would allow such a process to run in
   467   memory.  Generally, it is worth tuning trim thresholds when a
   468   program undergoes phases where several large chunks are allocated
   469   and released in ways that can reuse each other's storage, perhaps
   470   mixed with phases where there are no such chunks at all. The trim
   471   value must be greater than page size to have any useful effect.  To
   472   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   473   some people use of mallocing a huge space and then freeing it at
   474   program startup, in an attempt to reserve system memory, doesn't
   475   have the intended effect under automatic trimming, since that memory
   476   will immediately be returned to the system.
   477 
   478 DEFAULT_MMAP_THRESHOLD       default: 256K
   479       Also settable using mallopt(M_MMAP_THRESHOLD, x)
   480   The request size threshold for using MMAP to directly service a
   481   request. Requests of at least this size that cannot be allocated
   482   using already-existing space will be serviced via mmap.  (If enough
   483   normal freed space already exists it is used instead.)  Using mmap
   484   segregates relatively large chunks of memory so that they can be
   485   individually obtained and released from the host system. A request
   486   serviced through mmap is never reused by any other request (at least
   487   not directly; the system may just so happen to remap successive
   488   requests to the same locations).  Segregating space in this way has
   489   the benefits that: Mmapped space can always be individually released
   490   back to the system, which helps keep the system level memory demands
   491   of a long-lived program low.  Also, mapped memory doesn't become
   492   `locked' between other chunks, as can happen with normally allocated
   493   chunks, which means that even trimming via malloc_trim would not
   494   release them.  However, it has the disadvantage that the space
   495   cannot be reclaimed, consolidated, and then used to service later
   496   requests, as happens with normal chunks.  The advantages of mmap
   497   nearly always outweigh disadvantages for "large" chunks, but the
   498   value of "large" may vary across systems.  The default is an
   499   empirically derived value that works well in most systems. You can
   500   disable mmap by setting to MAX_SIZE_T.
   501 
   502 */
   503 
   504 #ifndef WIN32
   505 #ifdef _WIN32
   506 #define WIN32 1
   507 #endif /* _WIN32 */
   508 #endif /* WIN32 */
   509 #ifdef WIN32
   510 #define WIN32_LEAN_AND_MEAN
   511 #include <windows.h>
   512 #define HAVE_MMAP 1
   513 #define HAVE_MORECORE 0
   514 #define LACKS_UNISTD_H
   515 #define LACKS_SYS_PARAM_H
   516 #define LACKS_SYS_MMAN_H
   517 #define LACKS_STRING_H
   518 #define LACKS_STRINGS_H
   519 #define LACKS_SYS_TYPES_H
   520 #define LACKS_ERRNO_H
   521 #define LACKS_FCNTL_H
   522 #define MALLOC_FAILURE_ACTION
   523 #define MMAP_CLEARS 0           /* WINCE and some others apparently don't clear */
   524 #endif /* WIN32 */
   525 
   526 #if defined(DARWIN) || defined(_DARWIN)
   527 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   528 #ifndef HAVE_MORECORE
   529 #define HAVE_MORECORE 0
   530 #define HAVE_MMAP 1
   531 #endif /* HAVE_MORECORE */
   532 #endif /* DARWIN */
   533 
   534 #ifndef LACKS_SYS_TYPES_H
   535 #include <sys/types.h>          /* For size_t */
   536 #endif /* LACKS_SYS_TYPES_H */
   537 
   538 /* The maximum possible size_t value has all bits set */
   539 #define MAX_SIZE_T           (~(size_t)0)
   540 
   541 #ifndef ONLY_MSPACES
   542 #define ONLY_MSPACES 0
   543 #endif /* ONLY_MSPACES */
   544 #ifndef MSPACES
   545 #if ONLY_MSPACES
   546 #define MSPACES 1
   547 #else /* ONLY_MSPACES */
   548 #define MSPACES 0
   549 #endif /* ONLY_MSPACES */
   550 #endif /* MSPACES */
   551 #ifndef MALLOC_ALIGNMENT
   552 #define MALLOC_ALIGNMENT ((size_t)8U)
   553 #endif /* MALLOC_ALIGNMENT */
   554 #ifndef FOOTERS
   555 #define FOOTERS 0
   556 #endif /* FOOTERS */
   557 #ifndef ABORT
   558 #define ABORT  abort()
   559 #endif /* ABORT */
   560 #ifndef ABORT_ON_ASSERT_FAILURE
   561 #define ABORT_ON_ASSERT_FAILURE 1
   562 #endif /* ABORT_ON_ASSERT_FAILURE */
   563 #ifndef PROCEED_ON_ERROR
   564 #define PROCEED_ON_ERROR 0
   565 #endif /* PROCEED_ON_ERROR */
   566 #ifndef USE_LOCKS
   567 #define USE_LOCKS 0
   568 #endif /* USE_LOCKS */
   569 #ifndef INSECURE
   570 #define INSECURE 0
   571 #endif /* INSECURE */
   572 #ifndef HAVE_MMAP
   573 #define HAVE_MMAP 1
   574 #endif /* HAVE_MMAP */
   575 #ifndef MMAP_CLEARS
   576 #define MMAP_CLEARS 1
   577 #endif /* MMAP_CLEARS */
   578 #ifndef HAVE_MREMAP
   579 #ifdef linux
   580 #define HAVE_MREMAP 1
   581 #else /* linux */
   582 #define HAVE_MREMAP 0
   583 #endif /* linux */
   584 #endif /* HAVE_MREMAP */
   585 #ifndef MALLOC_FAILURE_ACTION
   586 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   587 #endif /* MALLOC_FAILURE_ACTION */
   588 #ifndef HAVE_MORECORE
   589 #if ONLY_MSPACES
   590 #define HAVE_MORECORE 0
   591 #else /* ONLY_MSPACES */
   592 #define HAVE_MORECORE 1
   593 #endif /* ONLY_MSPACES */
   594 #endif /* HAVE_MORECORE */
   595 #if !HAVE_MORECORE
   596 #define MORECORE_CONTIGUOUS 0
   597 #else /* !HAVE_MORECORE */
   598 #ifndef MORECORE
   599 #define MORECORE sbrk
   600 #endif /* MORECORE */
   601 #ifndef MORECORE_CONTIGUOUS
   602 #define MORECORE_CONTIGUOUS 1
   603 #endif /* MORECORE_CONTIGUOUS */
   604 #endif /* HAVE_MORECORE */
   605 #ifndef DEFAULT_GRANULARITY
   606 #if MORECORE_CONTIGUOUS
   607 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
   608 #else /* MORECORE_CONTIGUOUS */
   609 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   610 #endif /* MORECORE_CONTIGUOUS */
   611 #endif /* DEFAULT_GRANULARITY */
   612 #ifndef DEFAULT_TRIM_THRESHOLD
   613 #ifndef MORECORE_CANNOT_TRIM
   614 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   615 #else /* MORECORE_CANNOT_TRIM */
   616 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   617 #endif /* MORECORE_CANNOT_TRIM */
   618 #endif /* DEFAULT_TRIM_THRESHOLD */
   619 #ifndef DEFAULT_MMAP_THRESHOLD
   620 #if HAVE_MMAP
   621 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   622 #else /* HAVE_MMAP */
   623 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   624 #endif /* HAVE_MMAP */
   625 #endif /* DEFAULT_MMAP_THRESHOLD */
   626 #ifndef USE_BUILTIN_FFS
   627 #define USE_BUILTIN_FFS 0
   628 #endif /* USE_BUILTIN_FFS */
   629 #ifndef USE_DEV_RANDOM
   630 #define USE_DEV_RANDOM 0
   631 #endif /* USE_DEV_RANDOM */
   632 #ifndef NO_MALLINFO
   633 #define NO_MALLINFO 0
   634 #endif /* NO_MALLINFO */
   635 #ifndef MALLINFO_FIELD_TYPE
   636 #define MALLINFO_FIELD_TYPE size_t
   637 #endif /* MALLINFO_FIELD_TYPE */
   638 
   639 #define memset  SDL_memset
   640 #define memcpy  SDL_memcpy
   641 #define malloc  SDL_malloc
   642 #define calloc  SDL_calloc
   643 #define realloc SDL_realloc
   644 #define free    SDL_free
   645 
   646 /*
   647   mallopt tuning options.  SVID/XPG defines four standard parameter
   648   numbers for mallopt, normally defined in malloc.h.  None of these
   649   are used in this malloc, so setting them has no effect. But this
   650   malloc does support the following options.
   651 */
   652 
   653 #define M_TRIM_THRESHOLD     (-1)
   654 #define M_GRANULARITY        (-2)
   655 #define M_MMAP_THRESHOLD     (-3)
   656 
   657 /* ------------------------ Mallinfo declarations ------------------------ */
   658 
   659 #if !NO_MALLINFO
   660 /*
   661   This version of malloc supports the standard SVID/XPG mallinfo
   662   routine that returns a struct containing usage properties and
   663   statistics. It should work on any system that has a
   664   /usr/include/malloc.h defining struct mallinfo.  The main
   665   declaration needed is the mallinfo struct that is returned (by-copy)
   666   by mallinfo().  The malloinfo struct contains a bunch of fields that
   667   are not even meaningful in this version of malloc.  These fields are
   668   are instead filled by mallinfo() with other numbers that might be of
   669   interest.
   670 
   671   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   672   /usr/include/malloc.h file that includes a declaration of struct
   673   mallinfo.  If so, it is included; else a compliant version is
   674   declared below.  These must be precisely the same for mallinfo() to
   675   work.  The original SVID version of this struct, defined on most
   676   systems with mallinfo, declares all fields as ints. But some others
   677   define as unsigned long. If your system defines the fields using a
   678   type of different width than listed here, you MUST #include your
   679   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   680 */
   681 
   682 /* #define HAVE_USR_INCLUDE_MALLOC_H */
   683 
   684 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   685 #include "/usr/include/malloc.h"
   686 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   687 
   688 struct mallinfo
   689 {
   690     MALLINFO_FIELD_TYPE arena;  /* non-mmapped space allocated from system */
   691     MALLINFO_FIELD_TYPE ordblks;        /* number of free chunks */
   692     MALLINFO_FIELD_TYPE smblks; /* always 0 */
   693     MALLINFO_FIELD_TYPE hblks;  /* always 0 */
   694     MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
   695     MALLINFO_FIELD_TYPE usmblks;        /* maximum total allocated space */
   696     MALLINFO_FIELD_TYPE fsmblks;        /* always 0 */
   697     MALLINFO_FIELD_TYPE uordblks;       /* total allocated space */
   698     MALLINFO_FIELD_TYPE fordblks;       /* total free space */
   699     MALLINFO_FIELD_TYPE keepcost;       /* releasable (via malloc_trim) space */
   700 };
   701 
   702 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   703 #endif /* NO_MALLINFO */
   704 
   705 #ifdef __cplusplus
   706 extern "C"
   707 {
   708 #endif                          /* __cplusplus */
   709 
   710 #if !ONLY_MSPACES
   711 
   712 /* ------------------- Declarations of public routines ------------------- */
   713 
   714 #ifndef USE_DL_PREFIX
   715 #define dlcalloc               calloc
   716 #define dlfree                 free
   717 #define dlmalloc               malloc
   718 #define dlmemalign             memalign
   719 #define dlrealloc              realloc
   720 #define dlvalloc               valloc
   721 #define dlpvalloc              pvalloc
   722 #define dlmallinfo             mallinfo
   723 #define dlmallopt              mallopt
   724 #define dlmalloc_trim          malloc_trim
   725 #define dlmalloc_stats         malloc_stats
   726 #define dlmalloc_usable_size   malloc_usable_size
   727 #define dlmalloc_footprint     malloc_footprint
   728 #define dlmalloc_max_footprint malloc_max_footprint
   729 #define dlindependent_calloc   independent_calloc
   730 #define dlindependent_comalloc independent_comalloc
   731 #endif                          /* USE_DL_PREFIX */
   732 
   733 
   734 /*
   735   malloc(size_t n)
   736   Returns a pointer to a newly allocated chunk of at least n bytes, or
   737   null if no space is available, in which case errno is set to ENOMEM
   738   on ANSI C systems.
   739 
   740   If n is zero, malloc returns a minimum-sized chunk. (The minimum
   741   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   742   systems.)  Note that size_t is an unsigned type, so calls with
   743   arguments that would be negative if signed are interpreted as
   744   requests for huge amounts of space, which will often fail. The
   745   maximum supported value of n differs across systems, but is in all
   746   cases less than the maximum representable value of a size_t.
   747 */
   748     void *dlmalloc(size_t);
   749 
   750 /*
   751   free(void* p)
   752   Releases the chunk of memory pointed to by p, that had been previously
   753   allocated using malloc or a related routine such as realloc.
   754   It has no effect if p is null. If p was not malloced or already
   755   freed, free(p) will by default cause the current program to abort.
   756 */
   757     void dlfree(void *);
   758 
   759 /*
   760   calloc(size_t n_elements, size_t element_size);
   761   Returns a pointer to n_elements * element_size bytes, with all locations
   762   set to zero.
   763 */
   764     void *dlcalloc(size_t, size_t);
   765 
   766 /*
   767   realloc(void* p, size_t n)
   768   Returns a pointer to a chunk of size n that contains the same data
   769   as does chunk p up to the minimum of (n, p's size) bytes, or null
   770   if no space is available.
   771 
   772   The returned pointer may or may not be the same as p. The algorithm
   773   prefers extending p in most cases when possible, otherwise it
   774   employs the equivalent of a malloc-copy-free sequence.
   775 
   776   If p is null, realloc is equivalent to malloc.
   777 
   778   If space is not available, realloc returns null, errno is set (if on
   779   ANSI) and p is NOT freed.
   780 
   781   if n is for fewer bytes than already held by p, the newly unused
   782   space is lopped off and freed if possible.  realloc with a size
   783   argument of zero (re)allocates a minimum-sized chunk.
   784 
   785   The old unix realloc convention of allowing the last-free'd chunk
   786   to be used as an argument to realloc is not supported.
   787 */
   788 
   789     void *dlrealloc(void *, size_t);
   790 
   791 /*
   792   memalign(size_t alignment, size_t n);
   793   Returns a pointer to a newly allocated chunk of n bytes, aligned
   794   in accord with the alignment argument.
   795 
   796   The alignment argument should be a power of two. If the argument is
   797   not a power of two, the nearest greater power is used.
   798   8-byte alignment is guaranteed by normal malloc calls, so don't
   799   bother calling memalign with an argument of 8 or less.
   800 
   801   Overreliance on memalign is a sure way to fragment space.
   802 */
   803     void *dlmemalign(size_t, size_t);
   804 
   805 /*
   806   valloc(size_t n);
   807   Equivalent to memalign(pagesize, n), where pagesize is the page
   808   size of the system. If the pagesize is unknown, 4096 is used.
   809 */
   810     void *dlvalloc(size_t);
   811 
   812 /*
   813   mallopt(int parameter_number, int parameter_value)
   814   Sets tunable parameters The format is to provide a
   815   (parameter-number, parameter-value) pair.  mallopt then sets the
   816   corresponding parameter to the argument value if it can (i.e., so
   817   long as the value is meaningful), and returns 1 if successful else
   818   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   819   normally defined in malloc.h.  None of these are use in this malloc,
   820   so setting them has no effect. But this malloc also supports other
   821   options in mallopt. See below for details.  Briefly, supported
   822   parameters are as follows (listed defaults are for "typical"
   823   configurations).
   824 
   825   Symbol            param #  default    allowed param values
   826   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   827   M_GRANULARITY        -2     page size   any power of 2 >= page size
   828   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   829 */
   830     int dlmallopt(int, int);
   831 
   832 /*
   833   malloc_footprint();
   834   Returns the number of bytes obtained from the system.  The total
   835   number of bytes allocated by malloc, realloc etc., is less than this
   836   value. Unlike mallinfo, this function returns only a precomputed
   837   result, so can be called frequently to monitor memory consumption.
   838   Even if locks are otherwise defined, this function does not use them,
   839   so results might not be up to date.
   840 */
   841     size_t dlmalloc_footprint(void);
   842 
   843 /*
   844   malloc_max_footprint();
   845   Returns the maximum number of bytes obtained from the system. This
   846   value will be greater than current footprint if deallocated space
   847   has been reclaimed by the system. The peak number of bytes allocated
   848   by malloc, realloc etc., is less than this value. Unlike mallinfo,
   849   this function returns only a precomputed result, so can be called
   850   frequently to monitor memory consumption.  Even if locks are
   851   otherwise defined, this function does not use them, so results might
   852   not be up to date.
   853 */
   854     size_t dlmalloc_max_footprint(void);
   855 
   856 #if !NO_MALLINFO
   857 /*
   858   mallinfo()
   859   Returns (by copy) a struct containing various summary statistics:
   860 
   861   arena:     current total non-mmapped bytes allocated from system
   862   ordblks:   the number of free chunks
   863   smblks:    always zero.
   864   hblks:     current number of mmapped regions
   865   hblkhd:    total bytes held in mmapped regions
   866   usmblks:   the maximum total allocated space. This will be greater
   867                 than current total if trimming has occurred.
   868   fsmblks:   always zero
   869   uordblks:  current total allocated space (normal or mmapped)
   870   fordblks:  total free space
   871   keepcost:  the maximum number of bytes that could ideally be released
   872                back to system via malloc_trim. ("ideally" means that
   873                it ignores page restrictions etc.)
   874 
   875   Because these fields are ints, but internal bookkeeping may
   876   be kept as longs, the reported values may wrap around zero and
   877   thus be inaccurate.
   878 */
   879     struct mallinfo dlmallinfo(void);
   880 #endif                          /* NO_MALLINFO */
   881 
   882 /*
   883   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   884 
   885   independent_calloc is similar to calloc, but instead of returning a
   886   single cleared space, it returns an array of pointers to n_elements
   887   independent elements that can hold contents of size elem_size, each
   888   of which starts out cleared, and can be independently freed,
   889   realloc'ed etc. The elements are guaranteed to be adjacently
   890   allocated (this is not guaranteed to occur with multiple callocs or
   891   mallocs), which may also improve cache locality in some
   892   applications.
   893 
   894   The "chunks" argument is optional (i.e., may be null, which is
   895   probably the most typical usage). If it is null, the returned array
   896   is itself dynamically allocated and should also be freed when it is
   897   no longer needed. Otherwise, the chunks array must be of at least
   898   n_elements in length. It is filled in with the pointers to the
   899   chunks.
   900 
   901   In either case, independent_calloc returns this pointer array, or
   902   null if the allocation failed.  If n_elements is zero and "chunks"
   903   is null, it returns a chunk representing an array with zero elements
   904   (which should be freed if not wanted).
   905 
   906   Each element must be individually freed when it is no longer
   907   needed. If you'd like to instead be able to free all at once, you
   908   should instead use regular calloc and assign pointers into this
   909   space to represent elements.  (In this case though, you cannot
   910   independently free elements.)
   911 
   912   independent_calloc simplifies and speeds up implementations of many
   913   kinds of pools.  It may also be useful when constructing large data
   914   structures that initially have a fixed number of fixed-sized nodes,
   915   but the number is not known at compile time, and some of the nodes
   916   may later need to be freed. For example:
   917 
   918   struct Node { int item; struct Node* next; };
   919 
   920   struct Node* build_list() {
   921     struct Node** pool;
   922     int n = read_number_of_nodes_needed();
   923     if (n <= 0) return 0;
   924     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   925     if (pool == 0) die();
   926     // organize into a linked list...
   927     struct Node* first = pool[0];
   928     for (i = 0; i < n-1; ++i)
   929       pool[i]->next = pool[i+1];
   930     free(pool);     // Can now free the array (or not, if it is needed later)
   931     return first;
   932   }
   933 */
   934     void **dlindependent_calloc(size_t, size_t, void **);
   935 
   936 /*
   937   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   938 
   939   independent_comalloc allocates, all at once, a set of n_elements
   940   chunks with sizes indicated in the "sizes" array.    It returns
   941   an array of pointers to these elements, each of which can be
   942   independently freed, realloc'ed etc. The elements are guaranteed to
   943   be adjacently allocated (this is not guaranteed to occur with
   944   multiple callocs or mallocs), which may also improve cache locality
   945   in some applications.
   946 
   947   The "chunks" argument is optional (i.e., may be null). If it is null
   948   the returned array is itself dynamically allocated and should also
   949   be freed when it is no longer needed. Otherwise, the chunks array
   950   must be of at least n_elements in length. It is filled in with the
   951   pointers to the chunks.
   952 
   953   In either case, independent_comalloc returns this pointer array, or
   954   null if the allocation failed.  If n_elements is zero and chunks is
   955   null, it returns a chunk representing an array with zero elements
   956   (which should be freed if not wanted).
   957 
   958   Each element must be individually freed when it is no longer
   959   needed. If you'd like to instead be able to free all at once, you
   960   should instead use a single regular malloc, and assign pointers at
   961   particular offsets in the aggregate space. (In this case though, you
   962   cannot independently free elements.)
   963 
   964   independent_comallac differs from independent_calloc in that each
   965   element may have a different size, and also that it does not
   966   automatically clear elements.
   967 
   968   independent_comalloc can be used to speed up allocation in cases
   969   where several structs or objects must always be allocated at the
   970   same time.  For example:
   971 
   972   struct Head { ... }
   973   struct Foot { ... }
   974 
   975   void send_message(char* msg) {
   976     int msglen = strlen(msg);
   977     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   978     void* chunks[3];
   979     if (independent_comalloc(3, sizes, chunks) == 0)
   980       die();
   981     struct Head* head = (struct Head*)(chunks[0]);
   982     char*        body = (char*)(chunks[1]);
   983     struct Foot* foot = (struct Foot*)(chunks[2]);
   984     // ...
   985   }
   986 
   987   In general though, independent_comalloc is worth using only for
   988   larger values of n_elements. For small values, you probably won't
   989   detect enough difference from series of malloc calls to bother.
   990 
   991   Overuse of independent_comalloc can increase overall memory usage,
   992   since it cannot reuse existing noncontiguous small chunks that
   993   might be available for some of the elements.
   994 */
   995     void **dlindependent_comalloc(size_t, size_t *, void **);
   996 
   997 
   998 /*
   999   pvalloc(size_t n);
  1000   Equivalent to valloc(minimum-page-that-holds(n)), that is,
  1001   round up n to nearest pagesize.
  1002  */
  1003     void *dlpvalloc(size_t);
  1004 
  1005 /*
  1006   malloc_trim(size_t pad);
  1007 
  1008   If possible, gives memory back to the system (via negative arguments
  1009   to sbrk) if there is unused memory at the `high' end of the malloc
  1010   pool or in unused MMAP segments. You can call this after freeing
  1011   large blocks of memory to potentially reduce the system-level memory
  1012   requirements of a program. However, it cannot guarantee to reduce
  1013   memory. Under some allocation patterns, some large free blocks of
  1014   memory will be locked between two used chunks, so they cannot be
  1015   given back to the system.
  1016 
  1017   The `pad' argument to malloc_trim represents the amount of free
  1018   trailing space to leave untrimmed. If this argument is zero, only
  1019   the minimum amount of memory to maintain internal data structures
  1020   will be left. Non-zero arguments can be supplied to maintain enough
  1021   trailing space to service future expected allocations without having
  1022   to re-obtain memory from the system.
  1023 
  1024   Malloc_trim returns 1 if it actually released any memory, else 0.
  1025 */
  1026     int dlmalloc_trim(size_t);
  1027 
  1028 /*
  1029   malloc_usable_size(void* p);
  1030 
  1031   Returns the number of bytes you can actually use in
  1032   an allocated chunk, which may be more than you requested (although
  1033   often not) due to alignment and minimum size constraints.
  1034   You can use this many bytes without worrying about
  1035   overwriting other allocated objects. This is not a particularly great
  1036   programming practice. malloc_usable_size can be more useful in
  1037   debugging and assertions, for example:
  1038 
  1039   p = malloc(n);
  1040   assert(malloc_usable_size(p) >= 256);
  1041 */
  1042     size_t dlmalloc_usable_size(void *);
  1043 
  1044 /*
  1045   malloc_stats();
  1046   Prints on stderr the amount of space obtained from the system (both
  1047   via sbrk and mmap), the maximum amount (which may be more than
  1048   current if malloc_trim and/or munmap got called), and the current
  1049   number of bytes allocated via malloc (or realloc, etc) but not yet
  1050   freed. Note that this is the number of bytes allocated, not the
  1051   number requested. It will be larger than the number requested
  1052   because of alignment and bookkeeping overhead. Because it includes
  1053   alignment wastage as being in use, this figure may be greater than
  1054   zero even when no user-level chunks are allocated.
  1055 
  1056   The reported current and maximum system memory can be inaccurate if
  1057   a program makes other calls to system memory allocation functions
  1058   (normally sbrk) outside of malloc.
  1059 
  1060   malloc_stats prints only the most commonly interesting statistics.
  1061   More information can be obtained by calling mallinfo.
  1062 */
  1063     void dlmalloc_stats(void);
  1064 
  1065 #endif                          /* ONLY_MSPACES */
  1066 
  1067 #if MSPACES
  1068 
  1069 /*
  1070   mspace is an opaque type representing an independent
  1071   region of space that supports mspace_malloc, etc.
  1072 */
  1073     typedef void *mspace;
  1074 
  1075 /*
  1076   create_mspace creates and returns a new independent space with the
  1077   given initial capacity, or, if 0, the default granularity size.  It
  1078   returns null if there is no system memory available to create the
  1079   space.  If argument locked is non-zero, the space uses a separate
  1080   lock to control access. The capacity of the space will grow
  1081   dynamically as needed to service mspace_malloc requests.  You can
  1082   control the sizes of incremental increases of this space by
  1083   compiling with a different DEFAULT_GRANULARITY or dynamically
  1084   setting with mallopt(M_GRANULARITY, value).
  1085 */
  1086     mspace create_mspace(size_t capacity, int locked);
  1087 
  1088 /*
  1089   destroy_mspace destroys the given space, and attempts to return all
  1090   of its memory back to the system, returning the total number of
  1091   bytes freed. After destruction, the results of access to all memory
  1092   used by the space become undefined.
  1093 */
  1094     size_t destroy_mspace(mspace msp);
  1095 
  1096 /*
  1097   create_mspace_with_base uses the memory supplied as the initial base
  1098   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1099   space is used for bookkeeping, so the capacity must be at least this
  1100   large. (Otherwise 0 is returned.) When this initial space is
  1101   exhausted, additional memory will be obtained from the system.
  1102   Destroying this space will deallocate all additionally allocated
  1103   space (if possible) but not the initial base.
  1104 */
  1105     mspace create_mspace_with_base(void *base, size_t capacity, int locked);
  1106 
  1107 /*
  1108   mspace_malloc behaves as malloc, but operates within
  1109   the given space.
  1110 */
  1111     void *mspace_malloc(mspace msp, size_t bytes);
  1112 
  1113 /*
  1114   mspace_free behaves as free, but operates within
  1115   the given space.
  1116 
  1117   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1118   free may be called instead of mspace_free because freed chunks from
  1119   any space are handled by their originating spaces.
  1120 */
  1121     void mspace_free(mspace msp, void *mem);
  1122 
  1123 /*
  1124   mspace_realloc behaves as realloc, but operates within
  1125   the given space.
  1126 
  1127   If compiled with FOOTERS==1, mspace_realloc is not actually
  1128   needed.  realloc may be called instead of mspace_realloc because
  1129   realloced chunks from any space are handled by their originating
  1130   spaces.
  1131 */
  1132     void *mspace_realloc(mspace msp, void *mem, size_t newsize);
  1133 
  1134 /*
  1135   mspace_calloc behaves as calloc, but operates within
  1136   the given space.
  1137 */
  1138     void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1139 
  1140 /*
  1141   mspace_memalign behaves as memalign, but operates within
  1142   the given space.
  1143 */
  1144     void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1145 
  1146 /*
  1147   mspace_independent_calloc behaves as independent_calloc, but
  1148   operates within the given space.
  1149 */
  1150     void **mspace_independent_calloc(mspace msp, size_t n_elements,
  1151                                      size_t elem_size, void *chunks[]);
  1152 
  1153 /*
  1154   mspace_independent_comalloc behaves as independent_comalloc, but
  1155   operates within the given space.
  1156 */
  1157     void **mspace_independent_comalloc(mspace msp, size_t n_elements,
  1158                                        size_t sizes[], void *chunks[]);
  1159 
  1160 /*
  1161   mspace_footprint() returns the number of bytes obtained from the
  1162   system for this space.
  1163 */
  1164     size_t mspace_footprint(mspace msp);
  1165 
  1166 /*
  1167   mspace_max_footprint() returns the peak number of bytes obtained from the
  1168   system for this space.
  1169 */
  1170     size_t mspace_max_footprint(mspace msp);
  1171 
  1172 
  1173 #if !NO_MALLINFO
  1174 /*
  1175   mspace_mallinfo behaves as mallinfo, but reports properties of
  1176   the given space.
  1177 */
  1178     struct mallinfo mspace_mallinfo(mspace msp);
  1179 #endif                          /* NO_MALLINFO */
  1180 
  1181 /*
  1182   mspace_malloc_stats behaves as malloc_stats, but reports
  1183   properties of the given space.
  1184 */
  1185     void mspace_malloc_stats(mspace msp);
  1186 
  1187 /*
  1188   mspace_trim behaves as malloc_trim, but
  1189   operates within the given space.
  1190 */
  1191     int mspace_trim(mspace msp, size_t pad);
  1192 
  1193 /*
  1194   An alias for mallopt.
  1195 */
  1196     int mspace_mallopt(int, int);
  1197 
  1198 #endif                          /* MSPACES */
  1199 
  1200 #ifdef __cplusplus
  1201 };                              /* end of extern "C" */
  1202 #endif /* __cplusplus */
  1203 
  1204 /*
  1205   ========================================================================
  1206   To make a fully customizable malloc.h header file, cut everything
  1207   above this line, put into file malloc.h, edit to suit, and #include it
  1208   on the next line, as well as in programs that use this malloc.
  1209   ========================================================================
  1210 */
  1211 
  1212 /* #include "malloc.h" */
  1213 
  1214 /*------------------------------ internal #includes ---------------------- */
  1215 
  1216 #ifdef _MSC_VER
  1217 #pragma warning( disable : 4146 )       /* no "unsigned" warnings */
  1218 #endif /* _MSC_VER */
  1219 
  1220 #ifndef LACKS_STDIO_H
  1221 #include <stdio.h>              /* for printing in malloc_stats */
  1222 #endif
  1223 
  1224 #ifndef LACKS_ERRNO_H
  1225 #include <errno.h>              /* for MALLOC_FAILURE_ACTION */
  1226 #endif /* LACKS_ERRNO_H */
  1227 #if FOOTERS
  1228 #include <time.h>               /* for magic initialization */
  1229 #endif /* FOOTERS */
  1230 #ifndef LACKS_STDLIB_H
  1231 #include <stdlib.h>             /* for abort() */
  1232 #endif /* LACKS_STDLIB_H */
  1233 #ifdef DEBUG
  1234 #if ABORT_ON_ASSERT_FAILURE
  1235 #define assert(x) if(!(x)) ABORT
  1236 #else /* ABORT_ON_ASSERT_FAILURE */
  1237 #include <assert.h>
  1238 #endif /* ABORT_ON_ASSERT_FAILURE */
  1239 #else /* DEBUG */
  1240 #define assert(x)
  1241 #endif /* DEBUG */
  1242 #ifndef LACKS_STRING_H
  1243 #include <string.h>             /* for memset etc */
  1244 #endif /* LACKS_STRING_H */
  1245 #if USE_BUILTIN_FFS
  1246 #ifndef LACKS_STRINGS_H
  1247 #include <strings.h>            /* for ffs */
  1248 #endif /* LACKS_STRINGS_H */
  1249 #endif /* USE_BUILTIN_FFS */
  1250 #if HAVE_MMAP
  1251 #ifndef LACKS_SYS_MMAN_H
  1252 #include <sys/mman.h>           /* for mmap */
  1253 #endif /* LACKS_SYS_MMAN_H */
  1254 #ifndef LACKS_FCNTL_H
  1255 #include <fcntl.h>
  1256 #endif /* LACKS_FCNTL_H */
  1257 #endif /* HAVE_MMAP */
  1258 #if HAVE_MORECORE
  1259 #ifndef LACKS_UNISTD_H
  1260 #include <unistd.h>             /* for sbrk */
  1261 #else /* LACKS_UNISTD_H */
  1262 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1263 extern void *sbrk(ptrdiff_t);
  1264 #endif /* FreeBSD etc */
  1265 #endif /* LACKS_UNISTD_H */
  1266 #endif /* HAVE_MMAP */
  1267 
  1268 #ifndef WIN32
  1269 #ifndef malloc_getpagesize
  1270 #  ifdef _SC_PAGESIZE           /* some SVR4 systems omit an underscore */
  1271 #    ifndef _SC_PAGE_SIZE
  1272 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1273 #    endif
  1274 #  endif
  1275 #  ifdef _SC_PAGE_SIZE
  1276 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1277 #  else
  1278 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1279 extern size_t getpagesize();
  1280 #      define malloc_getpagesize getpagesize()
  1281 #    else
  1282 #      ifdef WIN32              /* use supplied emulation of getpagesize */
  1283 #        define malloc_getpagesize getpagesize()
  1284 #      else
  1285 #        ifndef LACKS_SYS_PARAM_H
  1286 #          include <sys/param.h>
  1287 #        endif
  1288 #        ifdef EXEC_PAGESIZE
  1289 #          define malloc_getpagesize EXEC_PAGESIZE
  1290 #        else
  1291 #          ifdef NBPG
  1292 #            ifndef CLSIZE
  1293 #              define malloc_getpagesize NBPG
  1294 #            else
  1295 #              define malloc_getpagesize (NBPG * CLSIZE)
  1296 #            endif
  1297 #          else
  1298 #            ifdef NBPC
  1299 #              define malloc_getpagesize NBPC
  1300 #            else
  1301 #              ifdef PAGESIZE
  1302 #                define malloc_getpagesize PAGESIZE
  1303 #              else /* just guess */
  1304 #                define malloc_getpagesize ((size_t)4096U)
  1305 #              endif
  1306 #            endif
  1307 #          endif
  1308 #        endif
  1309 #      endif
  1310 #    endif
  1311 #  endif
  1312 #endif
  1313 #endif
  1314 
  1315 /* ------------------- size_t and alignment properties -------------------- */
  1316 
  1317 /* The byte and bit size of a size_t */
  1318 #define SIZE_T_SIZE         (sizeof(size_t))
  1319 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1320 
  1321 /* Some constants coerced to size_t */
  1322 /* Annoying but necessary to avoid errors on some plaftorms */
  1323 #define SIZE_T_ZERO         ((size_t)0)
  1324 #define SIZE_T_ONE          ((size_t)1)
  1325 #define SIZE_T_TWO          ((size_t)2)
  1326 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1327 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1328 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1329 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1330 
  1331 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1332 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1333 
  1334 /* True if address a has acceptable alignment */
  1335 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1336 
  1337 /* the number of bytes to offset an address to align it */
  1338 #define align_offset(A)\
  1339  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1340   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1341 
  1342 /* -------------------------- MMAP preliminaries ------------------------- */
  1343 
  1344 /*
  1345    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1346    checks to fail so compiler optimizer can delete code rather than
  1347    using so many "#if"s.
  1348 */
  1349 
  1350 
  1351 /* MORECORE and MMAP must return MFAIL on failure */
  1352 #define MFAIL                ((void*)(MAX_SIZE_T))
  1353 #define CMFAIL               ((char*)(MFAIL))   /* defined for convenience */
  1354 
  1355 #if !HAVE_MMAP
  1356 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1357 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1358 #define CALL_MMAP(s)         MFAIL
  1359 #define CALL_MUNMAP(a, s)    (-1)
  1360 #define DIRECT_MMAP(s)       MFAIL
  1361 
  1362 #else /* HAVE_MMAP */
  1363 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1364 #define USE_MMAP_BIT         (SIZE_T_ONE)
  1365 
  1366 #ifndef WIN32
  1367 #define CALL_MUNMAP(a, s)    munmap((a), (s))
  1368 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1369 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1370 #define MAP_ANONYMOUS        MAP_ANON
  1371 #endif /* MAP_ANON */
  1372 #ifdef MAP_ANONYMOUS
  1373 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1374 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1375 #else /* MAP_ANONYMOUS */
  1376 /*
  1377    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1378    is unlikely to be needed, but is supplied just in case.
  1379 */
  1380 #define MMAP_FLAGS           (MAP_PRIVATE)
  1381 static int dev_zero_fd = -1;    /* Cached file descriptor for /dev/zero. */
  1382 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1383            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1384             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1385             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1386 #endif /* MAP_ANONYMOUS */
  1387 
  1388 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1389 #else /* WIN32 */
  1390 
  1391 /* Win32 MMAP via VirtualAlloc */
  1392 static void *
  1393 win32mmap(size_t size)
  1394 {
  1395     void *ptr =
  1396         VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
  1397     return (ptr != 0) ? ptr : MFAIL;
  1398 }
  1399 
  1400 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1401 static void *
  1402 win32direct_mmap(size_t size)
  1403 {
  1404     void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
  1405                              PAGE_READWRITE);
  1406     return (ptr != 0) ? ptr : MFAIL;
  1407 }
  1408 
  1409 /* This function supports releasing coalesed segments */
  1410 static int
  1411 win32munmap(void *ptr, size_t size)
  1412 {
  1413     MEMORY_BASIC_INFORMATION minfo;
  1414     char *cptr = ptr;
  1415     while (size) {
  1416         if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1417             return -1;
  1418         if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1419             minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1420             return -1;
  1421         if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1422             return -1;
  1423         cptr += minfo.RegionSize;
  1424         size -= minfo.RegionSize;
  1425     }
  1426     return 0;
  1427 }
  1428 
  1429 #define CALL_MMAP(s)         win32mmap(s)
  1430 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1431 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1432 #endif /* WIN32 */
  1433 #endif /* HAVE_MMAP */
  1434 
  1435 #if HAVE_MMAP && HAVE_MREMAP
  1436 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1437 #else /* HAVE_MMAP && HAVE_MREMAP */
  1438 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1439 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1440 
  1441 #if HAVE_MORECORE
  1442 #define CALL_MORECORE(S)     MORECORE(S)
  1443 #else /* HAVE_MORECORE */
  1444 #define CALL_MORECORE(S)     MFAIL
  1445 #endif /* HAVE_MORECORE */
  1446 
  1447 /* mstate bit set if continguous morecore disabled or failed */
  1448 #define USE_NONCONTIGUOUS_BIT (4U)
  1449 
  1450 /* segment bit set in create_mspace_with_base */
  1451 #define EXTERN_BIT            (8U)
  1452 
  1453 
  1454 /* --------------------------- Lock preliminaries ------------------------ */
  1455 
  1456 #if USE_LOCKS
  1457 
  1458 /*
  1459   When locks are defined, there are up to two global locks:
  1460 
  1461   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1462     MORECORE.  In many cases sys_alloc requires two calls, that should
  1463     not be interleaved with calls by other threads.  This does not
  1464     protect against direct calls to MORECORE by other threads not
  1465     using this lock, so there is still code to cope the best we can on
  1466     interference.
  1467 
  1468   * magic_init_mutex ensures that mparams.magic and other
  1469     unique mparams values are initialized only once.
  1470 */
  1471 
  1472 #ifndef WIN32
  1473 /* By default use posix locks */
  1474 #include <pthread.h>
  1475 #define MLOCK_T pthread_mutex_t
  1476 #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1477 #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1478 #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1479 
  1480 #if HAVE_MORECORE
  1481 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1482 #endif /* HAVE_MORECORE */
  1483 
  1484 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1485 
  1486 #else /* WIN32 */
  1487 /*
  1488    Because lock-protected regions have bounded times, and there
  1489    are no recursive lock calls, we can use simple spinlocks.
  1490 */
  1491 
  1492 #define MLOCK_T long
  1493 static int
  1494 win32_acquire_lock(MLOCK_T * sl)
  1495 {
  1496     for (;;) {
  1497 #ifdef InterlockedCompareExchangePointer
  1498         if (!InterlockedCompareExchange(sl, 1, 0))
  1499             return 0;
  1500 #else /* Use older void* version */
  1501         if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
  1502             return 0;
  1503 #endif /* InterlockedCompareExchangePointer */
  1504         Sleep(0);
  1505     }
  1506 }
  1507 
  1508 static void
  1509 win32_release_lock(MLOCK_T * sl)
  1510 {
  1511     InterlockedExchange(sl, 0);
  1512 }
  1513 
  1514 #define INITIAL_LOCK(l)      *(l)=0
  1515 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1516 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1517 #if HAVE_MORECORE
  1518 static MLOCK_T morecore_mutex;
  1519 #endif /* HAVE_MORECORE */
  1520 static MLOCK_T magic_init_mutex;
  1521 #endif /* WIN32 */
  1522 
  1523 #define USE_LOCK_BIT               (2U)
  1524 #else /* USE_LOCKS */
  1525 #define USE_LOCK_BIT               (0U)
  1526 #define INITIAL_LOCK(l)
  1527 #endif /* USE_LOCKS */
  1528 
  1529 #if USE_LOCKS && HAVE_MORECORE
  1530 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1531 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1532 #else /* USE_LOCKS && HAVE_MORECORE */
  1533 #define ACQUIRE_MORECORE_LOCK()
  1534 #define RELEASE_MORECORE_LOCK()
  1535 #endif /* USE_LOCKS && HAVE_MORECORE */
  1536 
  1537 #if USE_LOCKS
  1538 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1539 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1540 #else /* USE_LOCKS */
  1541 #define ACQUIRE_MAGIC_INIT_LOCK()
  1542 #define RELEASE_MAGIC_INIT_LOCK()
  1543 #endif /* USE_LOCKS */
  1544 
  1545 
  1546 /* -----------------------  Chunk representations ------------------------ */
  1547 
  1548 /*
  1549   (The following includes lightly edited explanations by Colin Plumb.)
  1550 
  1551   The malloc_chunk declaration below is misleading (but accurate and
  1552   necessary).  It declares a "view" into memory allowing access to
  1553   necessary fields at known offsets from a given base.
  1554 
  1555   Chunks of memory are maintained using a `boundary tag' method as
  1556   originally described by Knuth.  (See the paper by Paul Wilson
  1557   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1558   techniques.)  Sizes of free chunks are stored both in the front of
  1559   each chunk and at the end.  This makes consolidating fragmented
  1560   chunks into bigger chunks fast.  The head fields also hold bits
  1561   representing whether chunks are free or in use.
  1562 
  1563   Here are some pictures to make it clearer.  They are "exploded" to
  1564   show that the state of a chunk can be thought of as extending from
  1565   the high 31 bits of the head field of its header through the
  1566   prev_foot and PINUSE_BIT bit of the following chunk header.
  1567 
  1568   A chunk that's in use looks like:
  1569 
  1570    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1571            | Size of previous chunk (if P = 1)                             |
  1572            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1573          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1574          | Size of this chunk                                         1| +-+
  1575    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1576          |                                                               |
  1577          +-                                                             -+
  1578          |                                                               |
  1579          +-                                                             -+
  1580          |                                                               :
  1581          +-      size - sizeof(size_t) available payload bytes          -+
  1582          :                                                               |
  1583  chunk-> +-                                                             -+
  1584          |                                                               |
  1585          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1586        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1587        | Size of next chunk (may or may not be in use)               | +-+
  1588  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1589 
  1590     And if it's free, it looks like this:
  1591 
  1592    chunk-> +-                                                             -+
  1593            | User payload (must be in use, or we would have merged!)       |
  1594            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1595          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1596          | Size of this chunk                                         0| +-+
  1597    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1598          | Next pointer                                                  |
  1599          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1600          | Prev pointer                                                  |
  1601          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1602          |                                                               :
  1603          +-      size - sizeof(struct chunk) unused bytes               -+
  1604          :                                                               |
  1605  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1606          | Size of this chunk                                            |
  1607          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1608        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1609        | Size of next chunk (must be in use, or we would have merged)| +-+
  1610  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1611        |                                                               :
  1612        +- User payload                                                -+
  1613        :                                                               |
  1614        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1615                                                                      |0|
  1616                                                                      +-+
  1617   Note that since we always merge adjacent free chunks, the chunks
  1618   adjacent to a free chunk must be in use.
  1619 
  1620   Given a pointer to a chunk (which can be derived trivially from the
  1621   payload pointer) we can, in O(1) time, find out whether the adjacent
  1622   chunks are free, and if so, unlink them from the lists that they
  1623   are on and merge them with the current chunk.
  1624 
  1625   Chunks always begin on even word boundaries, so the mem portion
  1626   (which is returned to the user) is also on an even word boundary, and
  1627   thus at least double-word aligned.
  1628 
  1629   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1630   chunk size (which is always a multiple of two words), is an in-use
  1631   bit for the *previous* chunk.  If that bit is *clear*, then the
  1632   word before the current chunk size contains the previous chunk
  1633   size, and can be used to find the front of the previous chunk.
  1634   The very first chunk allocated always has this bit set, preventing
  1635   access to non-existent (or non-owned) memory. If pinuse is set for
  1636   any given chunk, then you CANNOT determine the size of the
  1637   previous chunk, and might even get a memory addressing fault when
  1638   trying to do so.
  1639 
  1640   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1641   the chunk size redundantly records whether the current chunk is
  1642   inuse. This redundancy enables usage checks within free and realloc,
  1643   and reduces indirection when freeing and consolidating chunks.
  1644 
  1645   Each freshly allocated chunk must have both cinuse and pinuse set.
  1646   That is, each allocated chunk borders either a previously allocated
  1647   and still in-use chunk, or the base of its memory arena. This is
  1648   ensured by making all allocations from the the `lowest' part of any
  1649   found chunk.  Further, no free chunk physically borders another one,
  1650   so each free chunk is known to be preceded and followed by either
  1651   inuse chunks or the ends of memory.
  1652 
  1653   Note that the `foot' of the current chunk is actually represented
  1654   as the prev_foot of the NEXT chunk. This makes it easier to
  1655   deal with alignments etc but can be very confusing when trying
  1656   to extend or adapt this code.
  1657 
  1658   The exceptions to all this are
  1659 
  1660      1. The special chunk `top' is the top-most available chunk (i.e.,
  1661         the one bordering the end of available memory). It is treated
  1662         specially.  Top is never included in any bin, is used only if
  1663         no other chunk is available, and is released back to the
  1664         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1665         the top chunk is treated as larger (and thus less well
  1666         fitting) than any other available chunk.  The top chunk
  1667         doesn't update its trailing size field since there is no next
  1668         contiguous chunk that would have to index off it. However,
  1669         space is still allocated for it (TOP_FOOT_SIZE) to enable
  1670         separation or merging when space is extended.
  1671 
  1672      3. Chunks allocated via mmap, which have the lowest-order bit
  1673         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1674         PINUSE_BIT in their head fields.  Because they are allocated
  1675         one-by-one, each must carry its own prev_foot field, which is
  1676         also used to hold the offset this chunk has within its mmapped
  1677         region, which is needed to preserve alignment. Each mmapped
  1678         chunk is trailed by the first two fields of a fake next-chunk
  1679         for sake of usage checks.
  1680 
  1681 */
  1682 
  1683 struct malloc_chunk
  1684 {
  1685     size_t prev_foot;           /* Size of previous chunk (if free).  */
  1686     size_t head;                /* Size and inuse bits. */
  1687     struct malloc_chunk *fd;    /* double links -- used only if free. */
  1688     struct malloc_chunk *bk;
  1689 };
  1690 
  1691 typedef struct malloc_chunk mchunk;
  1692 typedef struct malloc_chunk *mchunkptr;
  1693 typedef struct malloc_chunk *sbinptr;   /* The type of bins of chunks */
  1694 typedef size_t bindex_t;        /* Described below */
  1695 typedef unsigned int binmap_t;  /* Described below */
  1696 typedef unsigned int flag_t;    /* The type of various bit flag sets */
  1697 
  1698 /* ------------------- Chunks sizes and alignments ----------------------- */
  1699 
  1700 #define MCHUNK_SIZE         (sizeof(mchunk))
  1701 
  1702 #if FOOTERS
  1703 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1704 #else /* FOOTERS */
  1705 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1706 #endif /* FOOTERS */
  1707 
  1708 /* MMapped chunks need a second word of overhead ... */
  1709 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1710 /* ... and additional padding for fake next-chunk at foot */
  1711 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1712 
  1713 /* The smallest size we can malloc is an aligned minimal chunk */
  1714 #define MIN_CHUNK_SIZE\
  1715   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1716 
  1717 /* conversion from malloc headers to user pointers, and back */
  1718 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1719 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1720 /* chunk associated with aligned address A */
  1721 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1722 
  1723 /* Bounds on request (not chunk) sizes. */
  1724 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1725 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1726 
  1727 /* pad request bytes into a usable size */
  1728 #define pad_request(req) \
  1729    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1730 
  1731 /* pad request, checking for minimum (but not maximum) */
  1732 #define request2size(req) \
  1733   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1734 
  1735 
  1736 /* ------------------ Operations on head and foot fields ----------------- */
  1737 
  1738 /*
  1739   The head field of a chunk is or'ed with PINUSE_BIT when previous
  1740   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1741   use. If the chunk was obtained with mmap, the prev_foot field has
  1742   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1743   mmapped region to the base of the chunk.
  1744 */
  1745 
  1746 #define PINUSE_BIT          (SIZE_T_ONE)
  1747 #define CINUSE_BIT          (SIZE_T_TWO)
  1748 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1749 
  1750 /* Head value for fenceposts */
  1751 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1752 
  1753 /* extraction of fields from head words */
  1754 #define cinuse(p)           ((p)->head & CINUSE_BIT)
  1755 #define pinuse(p)           ((p)->head & PINUSE_BIT)
  1756 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1757 
  1758 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1759 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1760 
  1761 /* Treat space at ptr +/- offset as a chunk */
  1762 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1763 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1764 
  1765 /* Ptr to next or previous physical malloc_chunk. */
  1766 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1767 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1768 
  1769 /* extract next chunk's pinuse bit */
  1770 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1771 
  1772 /* Get/set size at footer */
  1773 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1774 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1775 
  1776 /* Set size, pinuse bit, and foot */
  1777 #define set_size_and_pinuse_of_free_chunk(p, s)\
  1778   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1779 
  1780 /* Set size, pinuse bit, foot, and clear next pinuse */
  1781 #define set_free_with_pinuse(p, s, n)\
  1782   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1783 
  1784 #define is_mmapped(p)\
  1785   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1786 
  1787 /* Get the internal overhead associated with chunk p */
  1788 #define overhead_for(p)\
  1789  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1790 
  1791 /* Return true if malloced space is not necessarily cleared */
  1792 #if MMAP_CLEARS
  1793 #define calloc_must_clear(p) (!is_mmapped(p))
  1794 #else /* MMAP_CLEARS */
  1795 #define calloc_must_clear(p) (1)
  1796 #endif /* MMAP_CLEARS */
  1797 
  1798 /* ---------------------- Overlaid data structures ----------------------- */
  1799 
  1800 /*
  1801   When chunks are not in use, they are treated as nodes of either
  1802   lists or trees.
  1803 
  1804   "Small"  chunks are stored in circular doubly-linked lists, and look
  1805   like this:
  1806 
  1807     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1808             |             Size of previous chunk                            |
  1809             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1810     `head:' |             Size of chunk, in bytes                         |P|
  1811       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1812             |             Forward pointer to next chunk in list             |
  1813             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1814             |             Back pointer to previous chunk in list            |
  1815             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1816             |             Unused space (may be 0 bytes long)                .
  1817             .                                                               .
  1818             .                                                               |
  1819 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1820     `foot:' |             Size of chunk, in bytes                           |
  1821             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1822 
  1823   Larger chunks are kept in a form of bitwise digital trees (aka
  1824   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1825   free chunks greater than 256 bytes, their size doesn't impose any
  1826   constraints on user chunk sizes.  Each node looks like:
  1827 
  1828     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1829             |             Size of previous chunk                            |
  1830             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1831     `head:' |             Size of chunk, in bytes                         |P|
  1832       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1833             |             Forward pointer to next chunk of same size        |
  1834             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1835             |             Back pointer to previous chunk of same size       |
  1836             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1837             |             Pointer to left child (child[0])                  |
  1838             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1839             |             Pointer to right child (child[1])                 |
  1840             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1841             |             Pointer to parent                                 |
  1842             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1843             |             bin index of this chunk                           |
  1844             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1845             |             Unused space                                      .
  1846             .                                                               |
  1847 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1848     `foot:' |             Size of chunk, in bytes                           |
  1849             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1850 
  1851   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1852   of the same size are arranged in a circularly-linked list, with only
  1853   the oldest chunk (the next to be used, in our FIFO ordering)
  1854   actually in the tree.  (Tree members are distinguished by a non-null
  1855   parent pointer.)  If a chunk with the same size an an existing node
  1856   is inserted, it is linked off the existing node using pointers that
  1857   work in the same way as fd/bk pointers of small chunks.
  1858 
  1859   Each tree contains a power of 2 sized range of chunk sizes (the
  1860   smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1861   tree level, with the chunks in the smaller half of the range (0x100
  1862   <= x < 0x140 for the top nose) in the left subtree and the larger
  1863   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1864   done by inspecting individual bits.
  1865 
  1866   Using these rules, each node's left subtree contains all smaller
  1867   sizes than its right subtree.  However, the node at the root of each
  1868   subtree has no particular ordering relationship to either.  (The
  1869   dividing line between the subtree sizes is based on trie relation.)
  1870   If we remove the last chunk of a given size from the interior of the
  1871   tree, we need to replace it with a leaf node.  The tree ordering
  1872   rules permit a node to be replaced by any leaf below it.
  1873 
  1874   The smallest chunk in a tree (a common operation in a best-fit
  1875   allocator) can be found by walking a path to the leftmost leaf in
  1876   the tree.  Unlike a usual binary tree, where we follow left child
  1877   pointers until we reach a null, here we follow the right child
  1878   pointer any time the left one is null, until we reach a leaf with
  1879   both child pointers null. The smallest chunk in the tree will be
  1880   somewhere along that path.
  1881 
  1882   The worst case number of steps to add, find, or remove a node is
  1883   bounded by the number of bits differentiating chunks within
  1884   bins. Under current bin calculations, this ranges from 6 up to 21
  1885   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1886   is of course much better.
  1887 */
  1888 
  1889 struct malloc_tree_chunk
  1890 {
  1891     /* The first four fields must be compatible with malloc_chunk */
  1892     size_t prev_foot;
  1893     size_t head;
  1894     struct malloc_tree_chunk *fd;
  1895     struct malloc_tree_chunk *bk;
  1896 
  1897     struct malloc_tree_chunk *child[2];
  1898     struct malloc_tree_chunk *parent;
  1899     bindex_t index;
  1900 };
  1901 
  1902 typedef struct malloc_tree_chunk tchunk;
  1903 typedef struct malloc_tree_chunk *tchunkptr;
  1904 typedef struct malloc_tree_chunk *tbinptr;      /* The type of bins of trees */
  1905 
  1906 /* A little helper macro for trees */
  1907 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1908 
  1909 /* ----------------------------- Segments -------------------------------- */
  1910 
  1911 /*
  1912   Each malloc space may include non-contiguous segments, held in a
  1913   list headed by an embedded malloc_segment record representing the
  1914   top-most space. Segments also include flags holding properties of
  1915   the space. Large chunks that are directly allocated by mmap are not
  1916   included in this list. They are instead independently created and
  1917   destroyed without otherwise keeping track of them.
  1918 
  1919   Segment management mainly comes into play for spaces allocated by
  1920   MMAP.  Any call to MMAP might or might not return memory that is
  1921   adjacent to an existing segment.  MORECORE normally contiguously
  1922   extends the current space, so this space is almost always adjacent,
  1923   which is simpler and faster to deal with. (This is why MORECORE is
  1924   used preferentially to MMAP when both are available -- see
  1925   sys_alloc.)  When allocating using MMAP, we don't use any of the
  1926   hinting mechanisms (inconsistently) supported in various
  1927   implementations of unix mmap, or distinguish reserving from
  1928   committing memory. Instead, we just ask for space, and exploit
  1929   contiguity when we get it.  It is probably possible to do
  1930   better than this on some systems, but no general scheme seems
  1931   to be significantly better.
  1932 
  1933   Management entails a simpler variant of the consolidation scheme
  1934   used for chunks to reduce fragmentation -- new adjacent memory is
  1935   normally prepended or appended to an existing segment. However,
  1936   there are limitations compared to chunk consolidation that mostly
  1937   reflect the fact that segment processing is relatively infrequent
  1938   (occurring only when getting memory from system) and that we
  1939   don't expect to have huge numbers of segments:
  1940 
  1941   * Segments are not indexed, so traversal requires linear scans.  (It
  1942     would be possible to index these, but is not worth the extra
  1943     overhead and complexity for most programs on most platforms.)
  1944   * New segments are only appended to old ones when holding top-most
  1945     memory; if they cannot be prepended to others, they are held in
  1946     different segments.
  1947 
  1948   Except for the top-most segment of an mstate, each segment record
  1949   is kept at the tail of its segment. Segments are added by pushing
  1950   segment records onto the list headed by &mstate.seg for the
  1951   containing mstate.
  1952 
  1953   Segment flags control allocation/merge/deallocation policies:
  1954   * If EXTERN_BIT set, then we did not allocate this segment,
  1955     and so should not try to deallocate or merge with others.
  1956     (This currently holds only for the initial segment passed
  1957     into create_mspace_with_base.)
  1958   * If IS_MMAPPED_BIT set, the segment may be merged with
  1959     other surrounding mmapped segments and trimmed/de-allocated
  1960     using munmap.
  1961   * If neither bit is set, then the segment was obtained using
  1962     MORECORE so can be merged with surrounding MORECORE'd segments
  1963     and deallocated/trimmed using MORECORE with negative arguments.
  1964 */
  1965 
  1966 struct malloc_segment
  1967 {
  1968     char *base;                 /* base address */
  1969     size_t size;                /* allocated size */
  1970     struct malloc_segment *next;        /* ptr to next segment */
  1971     flag_t sflags;              /* mmap and extern flag */
  1972 };
  1973 
  1974 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1975 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1976 
  1977 typedef struct malloc_segment msegment;
  1978 typedef struct malloc_segment *msegmentptr;
  1979 
  1980 /* ---------------------------- malloc_state ----------------------------- */
  1981 
  1982 /*
  1983    A malloc_state holds all of the bookkeeping for a space.
  1984    The main fields are:
  1985 
  1986   Top
  1987     The topmost chunk of the currently active segment. Its size is
  1988     cached in topsize.  The actual size of topmost space is
  1989     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  1990     fenceposts and segment records if necessary when getting more
  1991     space from the system.  The size at which to autotrim top is
  1992     cached from mparams in trim_check, except that it is disabled if
  1993     an autotrim fails.
  1994 
  1995   Designated victim (dv)
  1996     This is the preferred chunk for servicing small requests that
  1997     don't have exact fits.  It is normally the chunk split off most
  1998     recently to service another small request.  Its size is cached in
  1999     dvsize. The link fields of this chunk are not maintained since it
  2000     is not kept in a bin.
  2001 
  2002   SmallBins
  2003     An array of bin headers for free chunks.  These bins hold chunks
  2004     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  2005     chunks of all the same size, spaced 8 bytes apart.  To simplify
  2006     use in double-linked lists, each bin header acts as a malloc_chunk
  2007     pointing to the real first node, if it exists (else pointing to
  2008     itself).  This avoids special-casing for headers.  But to avoid
  2009     waste, we allocate only the fd/bk pointers of bins, and then use
  2010     repositioning tricks to treat these as the fields of a chunk.
  2011 
  2012   TreeBins
  2013     Treebins are pointers to the roots of trees holding a range of
  2014     sizes. There are 2 equally spaced treebins for each power of two
  2015     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  2016     larger.
  2017 
  2018   Bin maps
  2019     There is one bit map for small bins ("smallmap") and one for
  2020     treebins ("treemap).  Each bin sets its bit when non-empty, and
  2021     clears the bit when empty.  Bit operations are then used to avoid
  2022     bin-by-bin searching -- nearly all "search" is done without ever
  2023     looking at bins that won't be selected.  The bit maps
  2024     conservatively use 32 bits per map word, even if on 64bit system.
  2025     For a good description of some of the bit-based techniques used
  2026     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  2027     supplement at http://hackersdelight.org/). Many of these are
  2028     intended to reduce the branchiness of paths through malloc etc, as
  2029     well as to reduce the number of memory locations read or written.
  2030 
  2031   Segments
  2032     A list of segments headed by an embedded malloc_segment record
  2033     representing the initial space.
  2034 
  2035   Address check support
  2036     The least_addr field is the least address ever obtained from
  2037     MORECORE or MMAP. Attempted frees and reallocs of any address less
  2038     than this are trapped (unless INSECURE is defined).
  2039 
  2040   Magic tag
  2041     A cross-check field that should always hold same value as mparams.magic.
  2042 
  2043   Flags
  2044     Bits recording whether to use MMAP, locks, or contiguous MORECORE
  2045 
  2046   Statistics
  2047     Each space keeps track of current and maximum system memory
  2048     obtained via MORECORE or MMAP.
  2049 
  2050   Locking
  2051     If USE_LOCKS is defined, the "mutex" lock is acquired and released
  2052     around every public call using this mspace.
  2053 */
  2054 
  2055 /* Bin types, widths and sizes */
  2056 #define NSMALLBINS        (32U)
  2057 #define NTREEBINS         (32U)
  2058 #define SMALLBIN_SHIFT    (3U)
  2059 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  2060 #define TREEBIN_SHIFT     (8U)
  2061 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2062 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2063 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2064 
  2065 struct malloc_state
  2066 {
  2067     binmap_t smallmap;
  2068     binmap_t treemap;
  2069     size_t dvsize;
  2070     size_t topsize;
  2071     char *least_addr;
  2072     mchunkptr dv;
  2073     mchunkptr top;
  2074     size_t trim_check;
  2075     size_t magic;
  2076     mchunkptr smallbins[(NSMALLBINS + 1) * 2];
  2077     tbinptr treebins[NTREEBINS];
  2078     size_t footprint;
  2079     size_t max_footprint;
  2080     flag_t mflags;
  2081 #if USE_LOCKS
  2082     MLOCK_T mutex;              /* locate lock among fields that rarely change */
  2083 #endif                          /* USE_LOCKS */
  2084     msegment seg;
  2085 };
  2086 
  2087 typedef struct malloc_state *mstate;
  2088 
  2089 /* ------------- Global malloc_state and malloc_params ------------------- */
  2090 
  2091 /*
  2092   malloc_params holds global properties, including those that can be
  2093   dynamically set using mallopt. There is a single instance, mparams,
  2094   initialized in init_mparams.
  2095 */
  2096 
  2097 struct malloc_params
  2098 {
  2099     size_t magic;
  2100     size_t page_size;
  2101     size_t granularity;
  2102     size_t mmap_threshold;
  2103     size_t trim_threshold;
  2104     flag_t default_mflags;
  2105 };
  2106 
  2107 static struct malloc_params mparams;
  2108 
  2109 /* The global malloc_state used for all non-"mspace" calls */
  2110 static struct malloc_state _gm_;
  2111 #define gm                 (&_gm_)
  2112 #define is_global(M)       ((M) == &_gm_)
  2113 #define is_initialized(M)  ((M)->top != 0)
  2114 
  2115 /* -------------------------- system alloc setup ------------------------- */
  2116 
  2117 /* Operations on mflags */
  2118 
  2119 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  2120 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  2121 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  2122 
  2123 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  2124 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  2125 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  2126 
  2127 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  2128 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  2129 
  2130 #define set_lock(M,L)\
  2131  ((M)->mflags = (L)?\
  2132   ((M)->mflags | USE_LOCK_BIT) :\
  2133   ((M)->mflags & ~USE_LOCK_BIT))
  2134 
  2135 /* page-align a size */
  2136 #define page_align(S)\
  2137  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  2138 
  2139 /* granularity-align a size */
  2140 #define granularity_align(S)\
  2141   (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  2142 
  2143 #define is_page_aligned(S)\
  2144    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  2145 #define is_granularity_aligned(S)\
  2146    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  2147 
  2148 /*  True if segment S holds address A */
  2149 #define segment_holds(S, A)\
  2150   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2151 
  2152 /* Return segment holding given address */
  2153 static msegmentptr
  2154 segment_holding(mstate m, char *addr)
  2155 {
  2156     msegmentptr sp = &m->seg;
  2157     for (;;) {
  2158         if (addr >= sp->base && addr < sp->base + sp->size)
  2159             return sp;
  2160         if ((sp = sp->next) == 0)
  2161             return 0;
  2162     }
  2163 }
  2164 
  2165 /* Return true if segment contains a segment link */
  2166 static int
  2167 has_segment_link(mstate m, msegmentptr ss)
  2168 {
  2169     msegmentptr sp = &m->seg;
  2170     for (;;) {
  2171         if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
  2172             return 1;
  2173         if ((sp = sp->next) == 0)
  2174             return 0;
  2175     }
  2176 }
  2177 
  2178 #ifndef MORECORE_CANNOT_TRIM
  2179 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2180 #else /* MORECORE_CANNOT_TRIM */
  2181 #define should_trim(M,s)  (0)
  2182 #endif /* MORECORE_CANNOT_TRIM */
  2183 
  2184 /*
  2185   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2186   that may be needed to place segment records and fenceposts when new
  2187   noncontiguous segments are added.
  2188 */
  2189 #define TOP_FOOT_SIZE\
  2190   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  2191 
  2192 
  2193 /* -------------------------------  Hooks -------------------------------- */
  2194 
  2195 /*
  2196   PREACTION should be defined to return 0 on success, and nonzero on
  2197   failure. If you are not using locking, you can redefine these to do
  2198   anything you like.
  2199 */
  2200 
  2201 #if USE_LOCKS
  2202 
  2203 /* Ensure locks are initialized */
  2204 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  2205 
  2206 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  2207 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2208 #else /* USE_LOCKS */
  2209 
  2210 #ifndef PREACTION
  2211 #define PREACTION(M) (0)
  2212 #endif /* PREACTION */
  2213 
  2214 #ifndef POSTACTION
  2215 #define POSTACTION(M)
  2216 #endif /* POSTACTION */
  2217 
  2218 #endif /* USE_LOCKS */
  2219 
  2220 /*
  2221   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2222   USAGE_ERROR_ACTION is triggered on detected bad frees and
  2223   reallocs. The argument p is an address that might have triggered the
  2224   fault. It is ignored by the two predefined actions, but might be
  2225   useful in custom actions that try to help diagnose errors.
  2226 */
  2227 
  2228 #if PROCEED_ON_ERROR
  2229 
  2230 /* A count of the number of corruption errors causing resets */
  2231 int malloc_corruption_error_count;
  2232 
  2233 /* default corruption action */
  2234 static void reset_on_error(mstate m);
  2235 
  2236 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  2237 #define USAGE_ERROR_ACTION(m, p)
  2238 
  2239 #else /* PROCEED_ON_ERROR */
  2240 
  2241 #ifndef CORRUPTION_ERROR_ACTION
  2242 #define CORRUPTION_ERROR_ACTION(m) ABORT
  2243 #endif /* CORRUPTION_ERROR_ACTION */
  2244 
  2245 #ifndef USAGE_ERROR_ACTION
  2246 #define USAGE_ERROR_ACTION(m,p) ABORT
  2247 #endif /* USAGE_ERROR_ACTION */
  2248 
  2249 #endif /* PROCEED_ON_ERROR */
  2250 
  2251 /* -------------------------- Debugging setup ---------------------------- */
  2252 
  2253 #if ! DEBUG
  2254 
  2255 #define check_free_chunk(M,P)
  2256 #define check_inuse_chunk(M,P)
  2257 #define check_malloced_chunk(M,P,N)
  2258 #define check_mmapped_chunk(M,P)
  2259 #define check_malloc_state(M)
  2260 #define check_top_chunk(M,P)
  2261 
  2262 #else /* DEBUG */
  2263 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  2264 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  2265 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2266 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2267 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2268 #define check_malloc_state(M)       do_check_malloc_state(M)
  2269 
  2270 static void do_check_any_chunk(mstate m, mchunkptr p);
  2271 static void do_check_top_chunk(mstate m, mchunkptr p);
  2272 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
  2273 static void do_check_inuse_chunk(mstate m, mchunkptr p);
  2274 static void do_check_free_chunk(mstate m, mchunkptr p);
  2275 static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
  2276 static void do_check_tree(mstate m, tchunkptr t);
  2277 static void do_check_treebin(mstate m, bindex_t i);
  2278 static void do_check_smallbin(mstate m, bindex_t i);
  2279 static void do_check_malloc_state(mstate m);
  2280 static int bin_find(mstate m, mchunkptr x);
  2281 static size_t traverse_and_check(mstate m);
  2282 #endif /* DEBUG */
  2283 
  2284 /* ---------------------------- Indexing Bins ---------------------------- */
  2285 
  2286 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  2287 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  2288 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  2289 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  2290 
  2291 /* addressing by index. See above about smallbin repositioning */
  2292 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  2293 #define treebin_at(M,i)     (&((M)->treebins[i]))
  2294 
  2295 /* assign tree index for size S to variable I */
  2296 #if defined(__GNUC__) && defined(i386)
  2297 #define compute_tree_index(S, I)\
  2298 {\
  2299   size_t X = S >> TREEBIN_SHIFT;\
  2300   if (X == 0)\
  2301     I = 0;\
  2302   else if (X > 0xFFFF)\
  2303     I = NTREEBINS-1;\
  2304   else {\
  2305     unsigned int K;\
  2306     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  2307     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  2308   }\
  2309 }
  2310 #else /* GNUC */
  2311 #define compute_tree_index(S, I)\
  2312 {\
  2313   size_t X = S >> TREEBIN_SHIFT;\
  2314   if (X == 0)\
  2315     I = 0;\
  2316   else if (X > 0xFFFF)\
  2317     I = NTREEBINS-1;\
  2318   else {\
  2319     unsigned int Y = (unsigned int)X;\
  2320     unsigned int N = ((Y - 0x100) >> 16) & 8;\
  2321     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  2322     N += K;\
  2323     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  2324     K = 14 - N + ((Y <<= K) >> 15);\
  2325     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  2326   }\
  2327 }
  2328 #endif /* GNUC */
  2329 
  2330 /* Bit representing maximum resolved size in a treebin at i */
  2331 #define bit_for_tree_index(i) \
  2332    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  2333 
  2334 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
  2335 #define leftshift_for_tree_index(i) \
  2336    ((i == NTREEBINS-1)? 0 : \
  2337     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  2338 
  2339 /* The size of the smallest chunk held in bin with index i */
  2340 #define minsize_for_tree_index(i) \
  2341    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  2342    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  2343 
  2344 
  2345 /* ------------------------ Operations on bin maps ----------------------- */
  2346 
  2347 /* bit corresponding to given index */
  2348 #define idx2bit(i)              ((binmap_t)(1) << (i))
  2349 
  2350 /* Mark/Clear bits with given index */
  2351 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  2352 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  2353 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  2354 
  2355 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  2356 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  2357 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  2358 
  2359 /* index corresponding to given bit */
  2360 
  2361 #if defined(__GNUC__) && defined(i386)
  2362 #define compute_bit2idx(X, I)\
  2363 {\
  2364   unsigned int J;\
  2365   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  2366   I = (bindex_t)J;\
  2367 }
  2368 
  2369 #else /* GNUC */
  2370 #if  USE_BUILTIN_FFS
  2371 #define compute_bit2idx(X, I) I = ffs(X)-1
  2372 
  2373 #else /* USE_BUILTIN_FFS */
  2374 #define compute_bit2idx(X, I)\
  2375 {\
  2376   unsigned int Y = X - 1;\
  2377   unsigned int K = Y >> (16-4) & 16;\
  2378   unsigned int N = K;        Y >>= K;\
  2379   N += K = Y >> (8-3) &  8;  Y >>= K;\
  2380   N += K = Y >> (4-2) &  4;  Y >>= K;\
  2381   N += K = Y >> (2-1) &  2;  Y >>= K;\
  2382   N += K = Y >> (1-0) &  1;  Y >>= K;\
  2383   I = (bindex_t)(N + Y);\
  2384 }
  2385 #endif /* USE_BUILTIN_FFS */
  2386 #endif /* GNUC */
  2387 
  2388 /* isolate the least set bit of a bitmap */
  2389 #define least_bit(x)         ((x) & -(x))
  2390 
  2391 /* mask with all bits to left of least bit of x on */
  2392 #define left_bits(x)         ((x<<1) | -(x<<1))
  2393 
  2394 /* mask with all bits to left of or equal to least bit of x on */
  2395 #define same_or_left_bits(x) ((x) | -(x))
  2396 
  2397 
  2398 /* ----------------------- Runtime Check Support ------------------------- */
  2399 
  2400 /*
  2401   For security, the main invariant is that malloc/free/etc never
  2402   writes to a static address other than malloc_state, unless static
  2403   malloc_state itself has been corrupted, which cannot occur via
  2404   malloc (because of these checks). In essence this means that we
  2405   believe all pointers, sizes, maps etc held in malloc_state, but
  2406   check all of those linked or offsetted from other embedded data
  2407   structures.  These checks are interspersed with main code in a way
  2408   that tends to minimize their run-time cost.
  2409 
  2410   When FOOTERS is defined, in addition to range checking, we also
  2411   verify footer fields of inuse chunks, which can be used guarantee
  2412   that the mstate controlling malloc/free is intact.  This is a
  2413   streamlined version of the approach described by William Robertson
  2414   et al in "Run-time Detection of Heap-based Overflows" LISA'03
  2415   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  2416   of an inuse chunk holds the xor of its mstate and a random seed,
  2417   that is checked upon calls to free() and realloc().  This is
  2418   (probablistically) unguessable from outside the program, but can be
  2419   computed by any code successfully malloc'ing any chunk, so does not
  2420   itself provide protection against code that has already broken
  2421   security through some other means.  Unlike Robertson et al, we
  2422   always dynamically check addresses of all offset chunks (previous,
  2423   next, etc). This turns out to be cheaper than relying on hashes.
  2424 */
  2425 
  2426 #if !INSECURE
  2427 /* Check if address a is at least as high as any from MORECORE or MMAP */
  2428 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  2429 /* Check if address of next chunk n is higher than base chunk p */
  2430 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
  2431 /* Check if p has its cinuse bit on */
  2432 #define ok_cinuse(p)     cinuse(p)
  2433 /* Check if p has its pinuse bit on */
  2434 #define ok_pinuse(p)     pinuse(p)
  2435 
  2436 #else /* !INSECURE */
  2437 #define ok_address(M, a) (1)
  2438 #define ok_next(b, n)    (1)
  2439 #define ok_cinuse(p)     (1)
  2440 #define ok_pinuse(p)     (1)
  2441 #endif /* !INSECURE */
  2442 
  2443 #if (FOOTERS && !INSECURE)
  2444 /* Check if (alleged) mstate m has expected magic field */
  2445 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2446 #else /* (FOOTERS && !INSECURE) */
  2447 #define ok_magic(M)      (1)
  2448 #endif /* (FOOTERS && !INSECURE) */
  2449 
  2450 
  2451 /* In gcc, use __builtin_expect to minimize impact of checks */
  2452 #if !INSECURE
  2453 #if defined(__GNUC__) && __GNUC__ >= 3
  2454 #define RTCHECK(e)  __builtin_expect(e, 1)
  2455 #else /* GNUC */
  2456 #define RTCHECK(e)  (e)
  2457 #endif /* GNUC */
  2458 #else /* !INSECURE */
  2459 #define RTCHECK(e)  (1)
  2460 #endif /* !INSECURE */
  2461 
  2462 /* macros to set up inuse chunks with or without footers */
  2463 
  2464 #if !FOOTERS
  2465 
  2466 #define mark_inuse_foot(M,p,s)
  2467 
  2468 /* Set cinuse bit and pinuse bit of next chunk */
  2469 #define set_inuse(M,p,s)\
  2470   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2471   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2472 
  2473 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  2474 #define set_inuse_and_pinuse(M,p,s)\
  2475   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2476   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2477 
  2478 /* Set size, cinuse and pinuse bit of this chunk */
  2479 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2480   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  2481 
  2482 #else /* FOOTERS */
  2483 
  2484 /* Set foot of inuse chunk to be xor of mstate and seed */
  2485 #define mark_inuse_foot(M,p,s)\
  2486   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  2487 
  2488 #define get_mstate_for(p)\
  2489   ((mstate)(((mchunkptr)((char*)(p) +\
  2490     (chunksize(p))))->prev_foot ^ mparams.magic))
  2491 
  2492 #define set_inuse(M,p,s)\
  2493   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2494   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  2495   mark_inuse_foot(M,p,s))
  2496 
  2497 #define set_inuse_and_pinuse(M,p,s)\
  2498   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2499   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  2500  mark_inuse_foot(M,p,s))
  2501 
  2502 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2503   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2504   mark_inuse_foot(M, p, s))
  2505 
  2506 #endif /* !FOOTERS */
  2507 
  2508 /* ---------------------------- setting mparams -------------------------- */
  2509 
  2510 /* Initialize mparams */
  2511 static int
  2512 init_mparams(void)
  2513 {
  2514     if (mparams.page_size == 0) {
  2515         size_t s;
  2516 
  2517         mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2518         mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2519 #if MORECORE_CONTIGUOUS
  2520         mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
  2521 #else /* MORECORE_CONTIGUOUS */
  2522         mparams.default_mflags =
  2523             USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
  2524 #endif /* MORECORE_CONTIGUOUS */
  2525 
  2526 #if (FOOTERS && !INSECURE)
  2527         {
  2528 #if USE_DEV_RANDOM
  2529             int fd;
  2530             unsigned char buf[sizeof(size_t)];
  2531             /* Try to use /dev/urandom, else fall back on using time */
  2532             if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2533                 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2534                 s = *((size_t *) buf);
  2535                 close(fd);
  2536             } else
  2537 #endif /* USE_DEV_RANDOM */
  2538                 s = (size_t) (time(0) ^ (size_t) 0x55555555U);
  2539 
  2540             s |= (size_t) 8U;   /* ensure nonzero */
  2541             s &= ~(size_t) 7U;  /* improve chances of fault for bad values */
  2542 
  2543         }
  2544 #else /* (FOOTERS && !INSECURE) */
  2545         s = (size_t) 0x58585858U;
  2546 #endif /* (FOOTERS && !INSECURE) */
  2547         ACQUIRE_MAGIC_INIT_LOCK();
  2548         if (mparams.magic == 0) {
  2549             mparams.magic = s;
  2550             /* Set up lock for main malloc area */
  2551             INITIAL_LOCK(&gm->mutex);
  2552             gm->mflags = mparams.default_mflags;
  2553         }
  2554         RELEASE_MAGIC_INIT_LOCK();
  2555 
  2556 #ifndef WIN32
  2557         mparams.page_size = malloc_getpagesize;
  2558         mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
  2559                                DEFAULT_GRANULARITY : mparams.page_size);
  2560 #else /* WIN32 */
  2561         {
  2562             SYSTEM_INFO system_info;
  2563             GetSystemInfo(&system_info);
  2564             mparams.page_size = system_info.dwPageSize;
  2565             mparams.granularity = system_info.dwAllocationGranularity;
  2566         }
  2567 #endif /* WIN32 */
  2568 
  2569         /* Sanity-check configuration:
  2570            size_t must be unsigned and as wide as pointer type.
  2571            ints must be at least 4 bytes.
  2572            alignment must be at least 8.
  2573            Alignment, min chunk size, and page size must all be powers of 2.
  2574          */
  2575         if ((sizeof(size_t) != sizeof(char *)) ||
  2576             (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
  2577             (sizeof(int) < 4) ||
  2578             (MALLOC_ALIGNMENT < (size_t) 8U) ||
  2579             ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
  2580             ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
  2581             ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
  2582             || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
  2583             ABORT;
  2584     }
  2585     return 0;
  2586 }
  2587 
  2588 /* support for mallopt */
  2589 static int
  2590 change_mparam(int param_number, int value)
  2591 {
  2592     size_t val = (size_t) value;
  2593     init_mparams();
  2594     switch (param_number) {
  2595     case M_TRIM_THRESHOLD:
  2596         mparams.trim_threshold = val;
  2597         return 1;
  2598     case M_GRANULARITY:
  2599         if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
  2600             mparams.granularity = val;
  2601             return 1;
  2602         } else
  2603             return 0;
  2604     case M_MMAP_THRESHOLD:
  2605         mparams.mmap_threshold = val;
  2606         return 1;
  2607     default:
  2608         return 0;
  2609     }
  2610 }
  2611 
  2612 #if DEBUG
  2613 /* ------------------------- Debugging Support --------------------------- */
  2614 
  2615 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2616 static void
  2617 do_check_any_chunk(mstate m, mchunkptr p)
  2618 {
  2619     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2620     assert(ok_address(m, p));
  2621 }
  2622 
  2623 /* Check properties of top chunk */
  2624 static void
  2625 do_check_top_chunk(mstate m, mchunkptr p)
  2626 {
  2627     msegmentptr sp = segment_holding(m, (char *) p);
  2628     size_t sz = chunksize(p);
  2629     assert(sp != 0);
  2630     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2631     assert(ok_address(m, p));
  2632     assert(sz == m->topsize);
  2633     assert(sz > 0);
  2634     assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
  2635     assert(pinuse(p));
  2636     assert(!next_pinuse(p));
  2637 }
  2638 
  2639 /* Check properties of (inuse) mmapped chunks */
  2640 static void
  2641 do_check_mmapped_chunk(mstate m, mchunkptr p)
  2642 {
  2643     size_t sz = chunksize(p);
  2644     size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2645     assert(is_mmapped(p));
  2646     assert(use_mmap(m));
  2647     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2648     assert(ok_address(m, p));
  2649     assert(!is_small(sz));
  2650     assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
  2651     assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2652     assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
  2653 }
  2654 
  2655 /* Check properties of inuse chunks */
  2656 static void
  2657 do_check_inuse_chunk(mstate m, mchunkptr p)
  2658 {
  2659     do_check_any_chunk(m, p);
  2660     assert(cinuse(p));
  2661     assert(next_pinuse(p));
  2662     /* If not pinuse and not mmapped, previous chunk has OK offset */
  2663     assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2664     if (is_mmapped(p))
  2665         do_check_mmapped_chunk(m, p);
  2666 }
  2667 
  2668 /* Check properties of free chunks */
  2669 static void
  2670 do_check_free_chunk(mstate m, mchunkptr p)
  2671 {
  2672     size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
  2673     mchunkptr next = chunk_plus_offset(p, sz);
  2674     do_check_any_chunk(m, p);
  2675     assert(!cinuse(p));
  2676     assert(!next_pinuse(p));
  2677     assert(!is_mmapped(p));
  2678     if (p != m->dv && p != m->top) {
  2679         if (sz >= MIN_CHUNK_SIZE) {
  2680             assert((sz & CHUNK_ALIGN_MASK) == 0);
  2681             assert(is_aligned(chunk2mem(p)));
  2682             assert(next->prev_foot == sz);
  2683             assert(pinuse(p));
  2684             assert(next == m->top || cinuse(next));
  2685             assert(p->fd->bk == p);
  2686             assert(p->bk->fd == p);
  2687         } else                  /* markers are always of size SIZE_T_SIZE */
  2688             assert(sz == SIZE_T_SIZE);
  2689     }
  2690 }
  2691 
  2692 /* Check properties of malloced chunks at the point they are malloced */
  2693 static void
  2694 do_check_malloced_chunk(mstate m, void *mem, size_t s)
  2695 {
  2696     if (mem != 0) {
  2697         mchunkptr p = mem2chunk(mem);
  2698         size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
  2699         do_check_inuse_chunk(m, p);
  2700         assert((sz & CHUNK_ALIGN_MASK) == 0);
  2701         assert(sz >= MIN_CHUNK_SIZE);
  2702         assert(sz >= s);
  2703         /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2704         assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2705     }
  2706 }
  2707 
  2708 /* Check a tree and its subtrees.  */
  2709 static void
  2710 do_check_tree(mstate m, tchunkptr t)
  2711 {
  2712     tchunkptr head = 0;
  2713     tchunkptr u = t;
  2714     bindex_t tindex = t->index;
  2715     size_t tsize = chunksize(t);
  2716     bindex_t idx;
  2717     compute_tree_index(tsize, idx);
  2718     assert(tindex == idx);
  2719     assert(tsize >= MIN_LARGE_SIZE);
  2720     assert(tsize >= minsize_for_tree_index(idx));
  2721     assert((idx == NTREEBINS - 1)
  2722            || (tsize < minsize_for_tree_index((idx + 1))));
  2723 
  2724     do {                        /* traverse through chain of same-sized nodes */
  2725         do_check_any_chunk(m, ((mchunkptr) u));
  2726         assert(u->index == tindex);
  2727         assert(chunksize(u) == tsize);
  2728         assert(!cinuse(u));
  2729         assert(!next_pinuse(u));
  2730         assert(u->fd->bk == u);
  2731         assert(u->bk->fd == u);
  2732         if (u->parent == 0) {
  2733             assert(u->child[0] == 0);
  2734             assert(u->child[1] == 0);
  2735         } else {
  2736             assert(head == 0);  /* only one node on chain has parent */
  2737             head = u;
  2738             assert(u->parent != u);
  2739             assert(u->parent->child[0] == u ||
  2740                    u->parent->child[1] == u ||
  2741                    *((tbinptr *) (u->parent)) == u);
  2742             if (u->child[0] != 0) {
  2743                 assert(u->child[0]->parent == u);
  2744                 assert(u->child[0] != u);
  2745                 do_check_tree(m, u->child[0]);
  2746             }
  2747             if (u->child[1] != 0) {
  2748                 assert(u->child[1]->parent == u);
  2749                 assert(u->child[1] != u);
  2750                 do_check_tree(m, u->child[1]);
  2751             }
  2752             if (u->child[0] != 0 && u->child[1] != 0) {
  2753                 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2754             }
  2755         }
  2756         u = u->fd;
  2757     } while (u != t);
  2758     assert(head != 0);
  2759 }
  2760 
  2761 /*  Check all the chunks in a treebin.  */
  2762 static void
  2763 do_check_treebin(mstate m, bindex_t i)
  2764 {
  2765     tbinptr *tb = treebin_at(m, i);
  2766     tchunkptr t = *tb;
  2767     int empty = (m->treemap & (1U << i)) == 0;
  2768     if (t == 0)
  2769         assert(empty);
  2770     if (!empty)
  2771         do_check_tree(m, t);
  2772 }
  2773 
  2774 /*  Check all the chunks in a smallbin.  */
  2775 static void
  2776 do_check_smallbin(mstate m, bindex_t i)
  2777 {
  2778     sbinptr b = smallbin_at(m, i);
  2779     mchunkptr p = b->bk;
  2780     unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2781     if (p == b)
  2782         assert(empty);
  2783     if (!empty) {
  2784         for (; p != b; p = p->bk) {
  2785             size_t size = chunksize(p);
  2786             mchunkptr q;
  2787             /* each chunk claims to be free */
  2788             do_check_free_chunk(m, p);
  2789             /* chunk belongs in bin */
  2790             assert(small_index(size) == i);
  2791             assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2792             /* chunk is followed by an inuse chunk */
  2793             q = next_chunk(p);
  2794             if (q->head != FENCEPOST_HEAD)
  2795                 do_check_inuse_chunk(m, q);
  2796         }
  2797     }
  2798 }
  2799 
  2800 /* Find x in a bin. Used in other check functions. */
  2801 static int
  2802 bin_find(mstate m, mchunkptr x)
  2803 {
  2804     size_t size = chunksize(x);
  2805     if (is_small(size)) {
  2806         bindex_t sidx = small_index(size);
  2807         sbinptr b = smallbin_at(m, sidx);
  2808         if (smallmap_is_marked(m, sidx)) {
  2809             mchunkptr p = b;
  2810             do {
  2811                 if (p == x)
  2812                     return 1;
  2813             } while ((p = p->fd) != b);
  2814         }
  2815     } else {
  2816         bindex_t tidx;
  2817         compute_tree_index(size, tidx);
  2818         if (treemap_is_marked(m, tidx)) {
  2819             tchunkptr t = *treebin_at(m, tidx);
  2820             size_t sizebits = size << leftshift_for_tree_index(tidx);
  2821             while (t != 0 && chunksize(t) != size) {
  2822                 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
  2823                 sizebits <<= 1;
  2824             }
  2825             if (t != 0) {
  2826                 tchunkptr u = t;
  2827                 do {
  2828                     if (u == (tchunkptr) x)
  2829                         return 1;
  2830                 } while ((u = u->fd) != t);
  2831             }
  2832         }
  2833     }
  2834     return 0;
  2835 }
  2836 
  2837 /* Traverse each chunk and check it; return total */
  2838 static size_t
  2839 traverse_and_check(mstate m)
  2840 {
  2841     size_t sum = 0;
  2842     if (is_initialized(m)) {
  2843         msegmentptr s = &m->seg;
  2844         sum += m->topsize + TOP_FOOT_SIZE;
  2845         while (s != 0) {
  2846             mchunkptr q = align_as_chunk(s->base);
  2847             mchunkptr lastq = 0;
  2848             assert(pinuse(q));
  2849             while (segment_holds(s, q) &&
  2850                    q != m->top && q->head != FENCEPOST_HEAD) {
  2851                 sum += chunksize(q);
  2852                 if (cinuse(q)) {
  2853                     assert(!bin_find(m, q));
  2854                     do_check_inuse_chunk(m, q);
  2855                 } else {
  2856                     assert(q == m->dv || bin_find(m, q));
  2857                     assert(lastq == 0 || cinuse(lastq));        /* Not 2 consecutive free */
  2858                     do_check_free_chunk(m, q);
  2859                 }
  2860                 lastq = q;
  2861                 q = next_chunk(q);
  2862             }
  2863             s = s->next;
  2864         }
  2865     }
  2866     return sum;
  2867 }
  2868 
  2869 /* Check all properties of malloc_state. */
  2870 static void
  2871 do_check_malloc_state(mstate m)
  2872 {
  2873     bindex_t i;
  2874     size_t total;
  2875     /* check bins */
  2876     for (i = 0; i < NSMALLBINS; ++i)
  2877         do_check_smallbin(m, i);
  2878     for (i = 0; i < NTREEBINS; ++i)
  2879         do_check_treebin(m, i);
  2880 
  2881     if (m->dvsize != 0) {       /* check dv chunk */
  2882         do_check_any_chunk(m, m->dv);
  2883         assert(m->dvsize == chunksize(m->dv));
  2884         assert(m->dvsize >= MIN_CHUNK_SIZE);
  2885         assert(bin_find(m, m->dv) == 0);
  2886     }
  2887 
  2888     if (m->top != 0) {          /* check top chunk */
  2889         do_check_top_chunk(m, m->top);
  2890         assert(m->topsize == chunksize(m->top));
  2891         assert(m->topsize > 0);
  2892         assert(bin_find(m, m->top) == 0);
  2893     }
  2894 
  2895     total = traverse_and_check(m);
  2896     assert(total <= m->footprint);
  2897     assert(m->footprint <= m->max_footprint);
  2898 }
  2899 #endif /* DEBUG */
  2900 
  2901 /* ----------------------------- statistics ------------------------------ */
  2902 
  2903 #if !NO_MALLINFO
  2904 static struct mallinfo
  2905 internal_mallinfo(mstate m)
  2906 {
  2907     struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  2908     if (!PREACTION(m)) {
  2909         check_malloc_state(m);
  2910         if (is_initialized(m)) {
  2911             size_t nfree = SIZE_T_ONE;  /* top always free */
  2912             size_t mfree = m->topsize + TOP_FOOT_SIZE;
  2913             size_t sum = mfree;
  2914             msegmentptr s = &m->seg;
  2915             while (s != 0) {
  2916                 mchunkptr q = align_as_chunk(s->base);
  2917                 while (segment_holds(s, q) &&
  2918                        q != m->top && q->head != FENCEPOST_HEAD) {
  2919                     size_t sz = chunksize(q);
  2920                     sum += sz;
  2921                     if (!cinuse(q)) {
  2922                         mfree += sz;
  2923                         ++nfree;
  2924                     }
  2925                     q = next_chunk(q);
  2926                 }
  2927                 s = s->next;
  2928             }
  2929 
  2930             nm.arena = sum;
  2931             nm.ordblks = nfree;
  2932             nm.hblkhd = m->footprint - sum;
  2933             nm.usmblks = m->max_footprint;
  2934             nm.uordblks = m->footprint - mfree;
  2935             nm.fordblks = mfree;
  2936             nm.keepcost = m->topsize;
  2937         }
  2938 
  2939         POSTACTION(m);
  2940     }
  2941     return nm;
  2942 }
  2943 #endif /* !NO_MALLINFO */
  2944 
  2945 static void
  2946 internal_malloc_stats(mstate m)
  2947 {
  2948     if (!PREACTION(m)) {
  2949         size_t maxfp = 0;
  2950         size_t fp = 0;
  2951         size_t used = 0;
  2952         check_malloc_state(m);
  2953         if (is_initialized(m)) {
  2954             msegmentptr s = &m->seg;
  2955             maxfp = m->max_footprint;
  2956             fp = m->footprint;
  2957             used = fp - (m->topsize + TOP_FOOT_SIZE);
  2958 
  2959             while (s != 0) {
  2960                 mchunkptr q = align_as_chunk(s->base);
  2961                 while (segment_holds(s, q) &&
  2962                        q != m->top && q->head != FENCEPOST_HEAD) {
  2963                     if (!cinuse(q))
  2964                         used -= chunksize(q);
  2965                     q = next_chunk(q);
  2966                 }
  2967                 s = s->next;
  2968             }
  2969         }
  2970 #ifndef LACKS_STDIO_H
  2971         fprintf(stderr, "max system bytes = %10lu\n",
  2972                 (unsigned long) (maxfp));
  2973         fprintf(stderr, "system bytes     = %10lu\n", (unsigned long) (fp));
  2974         fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long) (used));
  2975 #endif
  2976 
  2977         POSTACTION(m);
  2978     }
  2979 }
  2980 
  2981 /* ----------------------- Operations on smallbins ----------------------- */
  2982 
  2983 /*
  2984   Various forms of linking and unlinking are defined as macros.  Even
  2985   the ones for trees, which are very long but have very short typical
  2986   paths.  This is ugly but reduces reliance on inlining support of
  2987   compilers.
  2988 */
  2989 
  2990 /* Link a free chunk into a smallbin  */
  2991 #define insert_small_chunk(M, P, S) {\
  2992   bindex_t I  = small_index(S);\
  2993   mchunkptr B = smallbin_at(M, I);\
  2994   mchunkptr F = B;\
  2995   assert(S >= MIN_CHUNK_SIZE);\
  2996   if (!smallmap_is_marked(M, I))\
  2997     mark_smallmap(M, I);\
  2998   else if (RTCHECK(ok_address(M, B->fd)))\
  2999     F = B->fd;\
  3000   else {\
  3001     CORRUPTION_ERROR_ACTION(M);\
  3002   }\
  3003   B->fd = P;\
  3004   F->bk = P;\
  3005   P->fd = F;\
  3006   P->bk = B;\
  3007 }
  3008 
  3009 /* Unlink a chunk from a smallbin  */
  3010 #define unlink_small_chunk(M, P, S) {\
  3011   mchunkptr F = P->fd;\
  3012   mchunkptr B = P->bk;\
  3013   bindex_t I = small_index(S);\
  3014   assert(P != B);\
  3015   assert(P != F);\
  3016   assert(chunksize(P) == small_index2size(I));\
  3017   if (F == B)\
  3018     clear_smallmap(M, I);\
  3019   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  3020                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  3021     F->bk = B;\
  3022     B->fd = F;\
  3023   }\
  3024   else {\
  3025     CORRUPTION_ERROR_ACTION(M);\
  3026   }\
  3027 }
  3028 
  3029 /* Unlink the first chunk from a smallbin */
  3030 #define unlink_first_small_chunk(M, B, P, I) {\
  3031   mchunkptr F = P->fd;\
  3032   assert(P != B);\
  3033   assert(P != F);\
  3034   assert(chunksize(P) == small_index2size(I));\
  3035   if (B == F)\
  3036     clear_smallmap(M, I);\
  3037   else if (RTCHECK(ok_address(M, F))) {\
  3038     B->fd = F;\
  3039     F->bk = B;\
  3040   }\
  3041   else {\
  3042     CORRUPTION_ERROR_ACTION(M);\
  3043   }\
  3044 }
  3045 
  3046 /* Replace dv node, binning the old one */
  3047 /* Used only when dvsize known to be small */
  3048 #define replace_dv(M, P, S) {\
  3049   size_t DVS = M->dvsize;\
  3050   if (DVS != 0) {\
  3051     mchunkptr DV = M->dv;\
  3052     assert(is_small(DVS));\
  3053     insert_small_chunk(M, DV, DVS);\
  3054   }\
  3055   M->dvsize = S;\
  3056   M->dv = P;\
  3057 }
  3058 
  3059 /* ------------------------- Operations on trees ------------------------- */
  3060 
  3061 /* Insert chunk into tree */
  3062 #define insert_large_chunk(M, X, S) {\
  3063   tbinptr* H;\
  3064   bindex_t I;\
  3065   compute_tree_index(S, I);\
  3066   H = treebin_at(M, I);\
  3067   X->index = I;\
  3068   X->child[0] = X->child[1] = 0;\
  3069   if (!treemap_is_marked(M, I)) {\
  3070     mark_treemap(M, I);\
  3071     *H = X;\
  3072     X->parent = (tchunkptr)H;\
  3073     X->fd = X->bk = X;\
  3074   }\
  3075   else {\
  3076     tchunkptr T = *H;\
  3077     size_t K = S << leftshift_for_tree_index(I);\
  3078     for (;;) {\
  3079       if (chunksize(T) != S) {\
  3080         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  3081         K <<= 1;\
  3082         if (*C != 0)\
  3083           T = *C;\
  3084         else if (RTCHECK(ok_address(M, C))) {\
  3085           *C = X;\
  3086           X->parent = T;\
  3087           X->fd = X->bk = X;\
  3088           break;\
  3089         }\
  3090         else {\
  3091           CORRUPTION_ERROR_ACTION(M);\
  3092           break;\
  3093         }\
  3094       }\
  3095       else {\
  3096         tchunkptr F = T->fd;\
  3097         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  3098           T->fd = F->bk = X;\
  3099           X->fd = F;\
  3100           X->bk = T;\
  3101           X->parent = 0;\
  3102           break;\
  3103         }\
  3104         else {\
  3105           CORRUPTION_ERROR_ACTION(M);\
  3106           break;\
  3107         }\
  3108       }\
  3109     }\
  3110   }\
  3111 }
  3112 
  3113 /*
  3114   Unlink steps:
  3115 
  3116   1. If x is a chained node, unlink it from its same-sized fd/bk links
  3117      and choose its bk node as its replacement.
  3118   2. If x was the last node of its size, but not a leaf node, it must
  3119      be replaced with a leaf node (not merely one with an open left or
  3120      right), to make sure that lefts and rights of descendents
  3121      correspond properly to bit masks.  We use the rightmost descendent
  3122      of x.  We could use any other leaf, but this is easy to locate and
  3123      tends to counteract removal of leftmosts elsewhere, and so keeps
  3124      paths shorter than minimally guaranteed.  This doesn't loop much
  3125      because on average a node in a tree is near the bottom.
  3126   3. If x is the base of a chain (i.e., has parent links) relink
  3127      x's parent and children to x's replacement (or null if none).
  3128 */
  3129 
  3130 #define unlink_large_chunk(M, X) {\
  3131   tchunkptr XP = X->parent;\
  3132   tchunkptr R;\
  3133   if (X->bk != X) {\
  3134     tchunkptr F = X->fd;\
  3135     R = X->bk;\
  3136     if (RTCHECK(ok_address(M, F))) {\
  3137       F->bk = R;\
  3138       R->fd = F;\
  3139     }\
  3140     else {\
  3141       CORRUPTION_ERROR_ACTION(M);\
  3142     }\
  3143   }\
  3144   else {\
  3145     tchunkptr* RP;\
  3146     if (((R = *(RP = &(X->child[1]))) != 0) ||\
  3147         ((R = *(RP = &(X->child[0]))) != 0)) {\
  3148       tchunkptr* CP;\
  3149       while ((*(CP = &(R->child[1])) != 0) ||\
  3150              (*(CP = &(R->child[0])) != 0)) {\
  3151         R = *(RP = CP);\
  3152       }\
  3153       if (RTCHECK(ok_address(M, RP)))\
  3154         *RP = 0;\
  3155       else {\
  3156         CORRUPTION_ERROR_ACTION(M);\
  3157       }\
  3158     }\
  3159   }\
  3160   if (XP != 0) {\
  3161     tbinptr* H = treebin_at(M, X->index);\
  3162     if (X == *H) {\
  3163       if ((*H = R) == 0) \
  3164         clear_treemap(M, X->index);\
  3165     }\
  3166     else if (RTCHECK(ok_address(M, XP))) {\
  3167       if (XP->child[0] == X) \
  3168         XP->child[0] = R;\
  3169       else \
  3170         XP->child[1] = R;\
  3171     }\
  3172     else\
  3173       CORRUPTION_ERROR_ACTION(M);\
  3174     if (R != 0) {\
  3175       if (RTCHECK(ok_address(M, R))) {\
  3176         tchunkptr C0, C1;\
  3177         R->parent = XP;\
  3178         if ((C0 = X->child[0]) != 0) {\
  3179           if (RTCHECK(ok_address(M, C0))) {\
  3180             R->child[0] = C0;\
  3181             C0->parent = R;\
  3182           }\
  3183           else\
  3184             CORRUPTION_ERROR_ACTION(M);\
  3185         }\
  3186         if ((C1 = X->child[1]) != 0) {\
  3187           if (RTCHECK(ok_address(M, C1))) {\
  3188             R->child[1] = C1;\
  3189             C1->parent = R;\
  3190           }\
  3191           else\
  3192             CORRUPTION_ERROR_ACTION(M);\
  3193         }\
  3194       }\
  3195       else\
  3196         CORRUPTION_ERROR_ACTION(M);\
  3197     }\
  3198   }\
  3199 }
  3200 
  3201 /* Relays to large vs small bin operations */
  3202 
  3203 #define insert_chunk(M, P, S)\
  3204   if (is_small(S)) insert_small_chunk(M, P, S)\
  3205   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  3206 
  3207 #define unlink_chunk(M, P, S)\
  3208   if (is_small(S)) unlink_small_chunk(M, P, S)\
  3209   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  3210 
  3211 
  3212 /* Relays to internal calls to malloc/free from realloc, memalign etc */
  3213 
  3214 #if ONLY_MSPACES
  3215 #define internal_malloc(m, b) mspace_malloc(m, b)
  3216 #define internal_free(m, mem) mspace_free(m,mem);
  3217 #else /* ONLY_MSPACES */
  3218 #if MSPACES
  3219 #define internal_malloc(m, b)\
  3220    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  3221 #define internal_free(m, mem)\
  3222    if (m == gm) dlfree(mem); else mspace_free(m,mem);
  3223 #else /* MSPACES */
  3224 #define internal_malloc(m, b) dlmalloc(b)
  3225 #define internal_free(m, mem) dlfree(mem)
  3226 #endif /* MSPACES */
  3227 #endif /* ONLY_MSPACES */
  3228 
  3229 /* -----------------------  Direct-mmapping chunks ----------------------- */
  3230 
  3231 /*
  3232   Directly mmapped chunks are set up with an offset to the start of
  3233   the mmapped region stored in the prev_foot field of the chunk. This
  3234   allows reconstruction of the required argument to MUNMAP when freed,
  3235   and also allows adjustment of the returned chunk to meet alignment
  3236   requirements (especially in memalign).  There is also enough space
  3237   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3238   the PINUSE bit so frees can be checked.
  3239 */
  3240 
  3241 /* Malloc using mmap */
  3242 static void *
  3243 mmap_alloc(mstate m, size_t nb)
  3244 {
  3245     size_t mmsize =
  3246         granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3247     if (mmsize > nb) {          /* Check for wrap around 0 */
  3248         char *mm = (char *) (DIRECT_MMAP(mmsize));
  3249         if (mm != CMFAIL) {
  3250             size_t offset = align_offset(chunk2mem(mm));
  3251             size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3252             mchunkptr p = (mchunkptr) (mm + offset);
  3253             p->prev_foot = offset | IS_MMAPPED_BIT;
  3254             (p)->head = (psize | CINUSE_BIT);
  3255             mark_inuse_foot(m, p, psize);
  3256             chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3257             chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
  3258 
  3259             if (mm < m->least_addr)
  3260                 m->least_addr = mm;
  3261             if ((m->footprint += mmsize) > m->max_footprint)
  3262                 m->max_footprint = m->footprint;
  3263             assert(is_aligned(chunk2mem(p)));
  3264             check_mmapped_chunk(m, p);
  3265             return chunk2mem(p);
  3266         }
  3267     }
  3268     return 0;
  3269 }
  3270 
  3271 /* Realloc using mmap */
  3272 static mchunkptr
  3273 mmap_resize(mstate m, mchunkptr oldp, size_t nb)
  3274 {
  3275     size_t oldsize = chunksize(oldp);
  3276     if (is_small(nb))           /* Can't shrink mmap regions below small size */
  3277         return 0;
  3278     /* Keep old chunk if big enough but not too big */
  3279     if (oldsize >= nb + SIZE_T_SIZE &&
  3280         (oldsize - nb) <= (mparams.granularity << 1))
  3281         return oldp;
  3282     else {
  3283         size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  3284         size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  3285         size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  3286                                              CHUNK_ALIGN_MASK);
  3287         char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
  3288                                         oldmmsize, newmmsize, 1);
  3289         if (cp != CMFAIL) {
  3290             mchunkptr newp = (mchunkptr) (cp + offset);
  3291             size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  3292             newp->head = (psize | CINUSE_BIT);
  3293             mark_inuse_foot(m, newp, psize);
  3294             chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  3295             chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
  3296 
  3297             if (cp < m->least_addr)
  3298                 m->least_addr = cp;
  3299             if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  3300                 m->max_footprint = m->footprint;
  3301             check_mmapped_chunk(m, newp);
  3302             return newp;
  3303         }
  3304     }
  3305     return 0;
  3306 }
  3307 
  3308 /* -------------------------- mspace management -------------------------- */
  3309 
  3310 /* Initialize top chunk and its size */
  3311 static void
  3312 init_top(mstate m, mchunkptr p, size_t psize)
  3313 {
  3314     /* Ensure alignment */
  3315     size_t offset = align_offset(chunk2mem(p));
  3316     p = (mchunkptr) ((char *) p + offset);
  3317     psize -= offset;
  3318 
  3319     m->top = p;
  3320     m->topsize = psize;
  3321     p->head = psize | PINUSE_BIT;
  3322     /* set size of fake trailing chunk holding overhead space only once */
  3323     chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3324     m->trim_check = mparams.trim_threshold;     /* reset on each update */
  3325 }
  3326 
  3327 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3328 static void
  3329 init_bins(mstate m)
  3330 {
  3331     /* Establish circular links for smallbins */
  3332     bindex_t i;
  3333     for (i = 0; i < NSMALLBINS; ++i) {
  3334         sbinptr bin = smallbin_at(m, i);
  3335         bin->fd = bin->bk = bin;
  3336     }
  3337 }
  3338 
  3339 #if PROCEED_ON_ERROR
  3340 
  3341 /* default corruption action */
  3342 static void
  3343 reset_on_error(mstate m)
  3344 {
  3345     int i;
  3346     ++malloc_corruption_error_count;
  3347     /* Reinitialize fields to forget about all memory */
  3348     m->smallbins = m->treebins = 0;
  3349     m->dvsize = m->topsize = 0;
  3350     m->seg.base = 0;
  3351     m->seg.size = 0;
  3352     m->seg.next = 0;
  3353     m->top = m->dv = 0;
  3354     for (i = 0; i < NTREEBINS; ++i)
  3355         *treebin_at(m, i) = 0;
  3356     init_bins(m);
  3357 }
  3358 #endif /* PROCEED_ON_ERROR */
  3359 
  3360 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3361 static void *
  3362 prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
  3363 {
  3364     mchunkptr p = align_as_chunk(newbase);
  3365     mchunkptr oldfirst = align_as_chunk(oldbase);
  3366     size_t psize = (char *) oldfirst - (char *) p;
  3367     mchunkptr q = chunk_plus_offset(p, nb);
  3368     size_t qsize = psize - nb;
  3369     set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3370 
  3371     assert((char *) oldfirst > (char *) q);
  3372     assert(pinuse(oldfirst));
  3373     assert(qsize >= MIN_CHUNK_SIZE);
  3374 
  3375     /* consolidate remainder with first chunk of old base */
  3376     if (oldfirst == m->top) {
  3377         size_t tsize = m->topsize += qsize;
  3378         m->top = q;
  3379         q->head = tsize | PINUSE_BIT;
  3380         check_top_chunk(m, q);
  3381     } else if (oldfirst == m->dv) {
  3382         size_t dsize = m->dvsize += qsize;
  3383         m->dv = q;
  3384         set_size_and_pinuse_of_free_chunk(q, dsize);
  3385     } else {
  3386         if (!cinuse(oldfirst)) {
  3387             size_t nsize = chunksize(oldfirst);
  3388             unlink_chunk(m, oldfirst, nsize);
  3389             oldfirst = chunk_plus_offset(oldfirst, nsize);
  3390             qsize += nsize;
  3391         }
  3392         set_free_with_pinuse(q, qsize, oldfirst);
  3393         insert_chunk(m, q, qsize);
  3394         check_free_chunk(m, q);
  3395     }
  3396 
  3397     check_malloced_chunk(m, chunk2mem(p), nb);
  3398     return chunk2mem(p);
  3399 }
  3400 
  3401 
  3402 /* Add a segment to hold a new noncontiguous region */
  3403 static void
  3404 add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
  3405 {
  3406     /* Determine locations and sizes of segment, fenceposts, old top */
  3407     char *old_top = (char *) m->top;
  3408     msegmentptr oldsp = segment_holding(m, old_top);
  3409     char *old_end = oldsp->base + oldsp->size;
  3410     size_t ssize = pad_request(sizeof(struct malloc_segment));
  3411     char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3412     size_t offset = align_offset(chunk2mem(rawsp));
  3413     char *asp = rawsp + offset;
  3414     char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
  3415     mchunkptr sp = (mchunkptr) csp;
  3416     msegmentptr ss = (msegmentptr) (chunk2mem(sp));
  3417     mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3418     mchunkptr p = tnext;
  3419     int nfences = 0;
  3420 
  3421     /* reset top to new space */
  3422     init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  3423 
  3424     /* Set up segment record */
  3425     assert(is_aligned(ss));
  3426     set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3427     *ss = m->seg;               /* Push current record */
  3428     m->seg.base = tbase;
  3429     m->seg.size = tsize;
  3430     m->seg.sflags = mmapped;
  3431     m->seg.next = ss;
  3432 
  3433     /* Insert trailing fenceposts */
  3434     for (;;) {
  3435         mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3436         p->head = FENCEPOST_HEAD;
  3437         ++nfences;
  3438         if ((char *) (&(nextp->head)) < old_end)
  3439             p = nextp;
  3440         else
  3441             break;
  3442     }
  3443     assert(nfences >= 2);
  3444 
  3445     /* Insert the rest of old top into a bin as an ordinary free chunk */
  3446     if (csp != old_top) {
  3447         mchunkptr q = (mchunkptr) old_top;
  3448         size_t psize = csp - old_top;
  3449         mchunkptr tn = chunk_plus_offset(q, psize);
  3450         set_free_with_pinuse(q, psize, tn);
  3451         insert_chunk(m, q, psize);
  3452     }
  3453 
  3454     check_top_chunk(m, m->top);
  3455 }
  3456 
  3457 /* -------------------------- System allocation -------------------------- */
  3458 
  3459 /* Get memory from system using MORECORE or MMAP */
  3460 static void *
  3461 sys_alloc(mstate m, size_t nb)
  3462 {
  3463     char *tbase = CMFAIL;
  3464     size_t tsize = 0;
  3465     flag_t mmap_flag = 0;
  3466 
  3467     init_mparams();
  3468 
  3469     /* Directly map large chunks */
  3470     if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3471         void *mem = mmap_alloc(m, nb);
  3472         if (mem != 0)
  3473             return mem;
  3474     }
  3475 
  3476     /*
  3477        Try getting memory in any of three ways (in most-preferred to
  3478        least-preferred order):
  3479        1. A call to MORECORE that can normally contiguously extend memory.
  3480        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3481        or main space is mmapped or a previous contiguous call failed)
  3482        2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3483        Note that under the default settings, if MORECORE is unable to
  3484        fulfill a request, and HAVE_MMAP is true, then mmap is
  3485        used as a noncontiguous system allocator. This is a useful backup
  3486        strategy for systems with holes in address spaces -- in this case
  3487        sbrk cannot contiguously expand the heap, but mmap may be able to
  3488        find space.
  3489        3. A call to MORECORE that cannot usually contiguously extend memory.
  3490        (disabled if not HAVE_MORECORE)
  3491      */
  3492 
  3493     if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3494         char *br = CMFAIL;
  3495         msegmentptr ss =
  3496             (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
  3497         size_t asize = 0;
  3498         ACQUIRE_MORECORE_LOCK();
  3499 
  3500         if (ss == 0) {          /* First time through or recovery */
  3501             char *base = (char *) CALL_MORECORE(0);
  3502             if (base != CMFAIL) {
  3503                 asize =
  3504                     granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
  3505                                       SIZE_T_ONE);
  3506                 /* Adjust to end on a page boundary */
  3507                 if (!is_page_aligned(base))
  3508                     asize += (page_align((size_t) base) - (size_t) base);
  3509                 /* Can't call MORECORE if size is negative when treated as signed */
  3510                 if (asize < HALF_MAX_SIZE_T &&
  3511                     (br = (char *) (CALL_MORECORE(asize))) == base) {
  3512                     tbase = base;
  3513                     tsize = asize;
  3514                 }
  3515             }
  3516         } else {
  3517             /* Subtract out existing available top space from MORECORE request. */
  3518             asize =
  3519                 granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
  3520                                   MALLOC_ALIGNMENT + SIZE_T_ONE);
  3521             /* Use mem here only if it did continuously extend old space */
  3522             if (asize < HALF_MAX_SIZE_T &&
  3523                 (br =
  3524                  (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
  3525                 tbase = br;
  3526                 tsize = asize;
  3527             }
  3528         }
  3529 
  3530         if (tbase == CMFAIL) {  /* Cope with partial failure */
  3531             if (br != CMFAIL) { /* Try to use/extend the space we did get */
  3532                 if (asize < HALF_MAX_SIZE_T &&
  3533                     asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3534                     size_t esize =
  3535                         granularity_align(nb + TOP_FOOT_SIZE +
  3536                                           MALLOC_ALIGNMENT + SIZE_T_ONE -
  3537                                           asize);
  3538                     if (esize < HALF_MAX_SIZE_T) {
  3539                         char *end = (char *) CALL_MORECORE(esize);
  3540                         if (end != CMFAIL)
  3541                             asize += esize;
  3542                         else {  /* Can't use; try to release */
  3543                             end = (char *) CALL_MORECORE(-asize);
  3544                             br = CMFAIL;
  3545                         }
  3546                     }
  3547                 }
  3548             }
  3549             if (br != CMFAIL) { /* Use the space we did get */
  3550                 tbase = br;
  3551                 tsize = asize;
  3552             } else
  3553                 disable_contiguous(m);  /* Don't try contiguous path in the future */
  3554         }
  3555 
  3556         RELEASE_MORECORE_LOCK();
  3557     }
  3558 
  3559     if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
  3560         size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
  3561         size_t rsize = granularity_align(req);
  3562         if (rsize > nb) {       /* Fail if wraps around zero */
  3563             char *mp = (char *) (CALL_MMAP(rsize));
  3564             if (mp != CMFAIL) {
  3565                 tbase = mp;
  3566                 tsize = rsize;
  3567                 mmap_flag = IS_MMAPPED_BIT;
  3568             }
  3569         }
  3570     }
  3571 
  3572     if (HAVE_MORECORE && tbase == CMFAIL) {     /* Try noncontiguous MORECORE */
  3573         size_t asize =
  3574             granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
  3575                               SIZE_T_ONE);
  3576         if (asize < HALF_MAX_SIZE_T) {
  3577             char *br = CMFAIL;
  3578             char *end = CMFAIL;
  3579             ACQUIRE_MORECORE_LOCK();
  3580             br = (char *) (CALL_MORECORE(asize));
  3581             end = (char *) (CALL_MORECORE(0));
  3582             RELEASE_MORECORE_LOCK();
  3583             if (br != CMFAIL && end != CMFAIL && br < end) {
  3584                 size_t ssize = end - br;
  3585                 if (ssize > nb + TOP_FOOT_SIZE) {
  3586                     tbase = br;
  3587                     tsize = ssize;
  3588                 }
  3589             }
  3590         }
  3591     }
  3592 
  3593     if (tbase != CMFAIL) {
  3594 
  3595         if ((m->footprint += tsize) > m->max_footprint)
  3596             m->max_footprint = m->footprint;
  3597 
  3598         if (!is_initialized(m)) {       /* first-time initialization */
  3599             m->seg.base = m->least_addr = tbase;
  3600             m->seg.size = tsize;
  3601             m->seg.sflags = mmap_flag;
  3602             m->magic = mparams.magic;
  3603             init_bins(m);
  3604             if (is_global(m))
  3605                 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  3606             else {
  3607                 /* Offset top by embedded malloc_state */
  3608                 mchunkptr mn = next_chunk(mem2chunk(m));
  3609                 init_top(m, mn,
  3610                          (size_t) ((tbase + tsize) - (char *) mn) -
  3611                          TOP_FOOT_SIZE);
  3612             }
  3613         }
  3614 
  3615         else {
  3616             /* Try to merge with an existing segment */
  3617             msegmentptr sp = &m->seg;
  3618             while (sp != 0 && tbase != sp->base + sp->size)
  3619                 sp = sp->next;
  3620             if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
  3621                 sp->size += tsize;
  3622                 init_top(m, m->top, m->topsize + tsize);
  3623             } else {
  3624                 if (tbase < m->least_addr)
  3625                     m->least_addr = tbase;
  3626                 sp = &m->seg;
  3627                 while (sp != 0 && sp->base != tbase + tsize)
  3628                     sp = sp->next;
  3629                 if (sp != 0 &&
  3630                     !is_extern_segment(sp) &&
  3631                     (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  3632                     char *oldbase = sp->base;
  3633                     sp->base = tbase;
  3634                     sp->size += tsize;
  3635                     return prepend_alloc(m, tbase, oldbase, nb);
  3636                 } else
  3637                     add_segment(m, tbase, tsize, mmap_flag);
  3638             }
  3639         }
  3640 
  3641         if (nb < m->topsize) {  /* Allocate from new or extended top space */
  3642             size_t rsize = m->topsize -= nb;
  3643             mchunkptr p = m->top;
  3644             mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3645             r->head = rsize | PINUSE_BIT;
  3646             set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3647             check_top_chunk(m, m->top);
  3648             check_malloced_chunk(m, chunk2mem(p), nb);
  3649             return chunk2mem(p);
  3650         }
  3651     }
  3652 
  3653     MALLOC_FAILURE_ACTION;
  3654     return 0;
  3655 }
  3656 
  3657 /* -----------------------  system deallocation -------------------------- */
  3658 
  3659 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3660 static size_t
  3661 release_unused_segments(mstate m)
  3662 {
  3663     size_t released = 0;
  3664     msegmentptr pred = &m->seg;
  3665     msegmentptr sp = pred->next;
  3666     while (sp != 0) {
  3667         char *base = sp->base;
  3668         size_t size = sp->size;
  3669         msegmentptr next = sp->next;
  3670         if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3671             mchunkptr p = align_as_chunk(base);
  3672             size_t psize = chunksize(p);
  3673             /* Can unmap if first chunk holds entire segment and not pinned */
  3674             if (!cinuse(p)
  3675                 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
  3676                 tchunkptr tp = (tchunkptr) p;
  3677                 assert(segment_holds(sp, (char *) sp));
  3678                 if (p == m->dv) {
  3679                     m->dv = 0;
  3680                     m->dvsize = 0;
  3681                 } else {
  3682                     unlink_large_chunk(m, tp);
  3683                 }
  3684                 if (CALL_MUNMAP(base, size) == 0) {
  3685                     released += size;
  3686                     m->footprint -= size;
  3687                     /* unlink obsoleted record */
  3688                     sp = pred;
  3689                     sp->next = next;
  3690                 } else {        /* back out if cannot unmap */
  3691                     insert_large_chunk(m, tp, psize);
  3692                 }
  3693             }
  3694         }
  3695         pred = sp;
  3696         sp = next;
  3697     }
  3698     return released;
  3699 }
  3700 
  3701 static int
  3702 sys_trim(mstate m, size_t pad)
  3703 {
  3704     size_t released = 0;
  3705     if (pad < MAX_REQUEST && is_initialized(m)) {
  3706         pad += TOP_FOOT_SIZE;   /* ensure enough room for segment overhead */
  3707 
  3708         if (m->topsize > pad) {
  3709             /* Shrink top space in granularity-size units, keeping at least one */
  3710             size_t unit = mparams.granularity;
  3711             size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  3712                             SIZE_T_ONE) * unit;
  3713             msegmentptr sp = segment_holding(m, (char *) m->top);
  3714 
  3715             if (!is_extern_segment(sp)) {
  3716                 if (is_mmapped_segment(sp)) {
  3717                     if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) {   /* can't shrink if pinned */
  3718                         size_t newsize = sp->size - extra;
  3719                         /* Prefer mremap, fall back to munmap */
  3720                         if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
  3721                              MFAIL)
  3722                             || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  3723                             released = extra;
  3724                         }
  3725                     }
  3726                 } else if (HAVE_MORECORE) {
  3727                     if (extra >= HALF_MAX_SIZE_T)       /* Avoid wrapping negative */
  3728                         extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  3729                     ACQUIRE_MORECORE_LOCK();
  3730                     {
  3731                         /* Make sure end of memory is where we last set it. */
  3732                         char *old_br = (char *) (CALL_MORECORE(0));
  3733                         if (old_br == sp->base + sp->size) {
  3734                             char *rel_br = (char *) (CALL_MORECORE(-extra));
  3735                             char *new_br = (char *) (CALL_MORECORE(0));
  3736                             if (rel_br != CMFAIL && new_br < old_br)
  3737                                 released = old_br - new_br;
  3738                         }
  3739                     }
  3740                     RELEASE_MORECORE_LOCK();
  3741                 }
  3742             }
  3743 
  3744             if (released != 0) {
  3745                 sp->size -= released;
  3746                 m->footprint -= released;
  3747                 init_top(m, m->top, m->topsize - released);
  3748                 check_top_chunk(m, m->top);
  3749             }
  3750         }
  3751 
  3752         /* Unmap any unused mmapped segments */
  3753         if (HAVE_MMAP)
  3754             released += release_unused_segments(m);
  3755 
  3756         /* On failure, disable autotrim to avoid repeated failed future calls */
  3757         if (released == 0)
  3758             m->trim_check = MAX_SIZE_T;
  3759     }
  3760 
  3761     return (released != 0) ? 1 : 0;
  3762 }
  3763 
  3764 /* ---------------------------- malloc support --------------------------- */
  3765 
  3766 /* allocate a large request from the best fitting chunk in a treebin */
  3767 static void *
  3768 tmalloc_large(mstate m, size_t nb)
  3769 {
  3770     tchunkptr v = 0;
  3771     size_t rsize = -nb;         /* Unsigned negation */
  3772     tchunkptr t;
  3773     bindex_t idx;
  3774     compute_tree_index(nb, idx);
  3775 
  3776     if ((t = *treebin_at(m, idx)) != 0) {
  3777         /* Traverse tree for this bin looking for node with size == nb */
  3778         size_t sizebits = nb << leftshift_for_tree_index(idx);
  3779         tchunkptr rst = 0;      /* The deepest untaken right subtree */
  3780         for (;;) {
  3781             tchunkptr rt;
  3782             size_t trem = chunksize(t) - nb;
  3783             if (trem < rsize) {
  3784                 v = t;
  3785                 if ((rsize = trem) == 0)
  3786                     break;
  3787             }
  3788             rt = t->child[1];
  3789             t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
  3790             if (rt != 0 && rt != t)
  3791                 rst = rt;
  3792             if (t == 0) {
  3793                 t = rst;        /* set t to least subtree holding sizes > nb */
  3794                 break;
  3795             }
  3796             sizebits <<= 1;
  3797         }
  3798     }
  3799 
  3800     if (t == 0 && v == 0) {     /* set t to root of next non-empty treebin */
  3801         binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  3802         if (leftbits != 0) {
  3803             bindex_t i;
  3804             binmap_t leastbit = least_bit(leftbits);
  3805             compute_bit2idx(leastbit, i);
  3806             t = *treebin_at(m, i);
  3807         }
  3808     }
  3809 
  3810     while (t != 0) {            /* find smallest of tree or subtree */
  3811         size_t trem = chunksize(t) - nb;
  3812         if (trem < rsize) {
  3813             rsize = trem;
  3814             v = t;
  3815         }
  3816         t = leftmost_child(t);
  3817     }
  3818 
  3819     /*  If dv is a better fit, return 0 so malloc will use it */
  3820     if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
  3821         if (RTCHECK(ok_address(m, v))) {        /* split */
  3822             mchunkptr r = chunk_plus_offset(v, nb);
  3823             assert(chunksize(v) == rsize + nb);
  3824             if (RTCHECK(ok_next(v, r))) {
  3825                 unlink_large_chunk(m, v);
  3826                 if (rsize < MIN_CHUNK_SIZE)
  3827                     set_inuse_and_pinuse(m, v, (rsize + nb));
  3828                 else {
  3829                     set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3830                     set_size_and_pinuse_of_free_chunk(r, rsize);
  3831                     insert_chunk(m, r, rsize);
  3832                 }
  3833                 return chunk2mem(v);
  3834             }
  3835         }
  3836         CORRUPTION_ERROR_ACTION(m);
  3837     }
  3838     return 0;
  3839 }
  3840 
  3841 /* allocate a small request from the best fitting chunk in a treebin */
  3842 static void *
  3843 tmalloc_small(mstate m, size_t nb)
  3844 {
  3845     tchunkptr t, v;
  3846     size_t rsize;
  3847     bindex_t i;
  3848     binmap_t leastbit = least_bit(m->treemap);
  3849     compute_bit2idx(leastbit, i);
  3850 
  3851     v = t = *treebin_at(m, i);
  3852     rsize = chunksize(t) - nb;
  3853 
  3854     while ((t = leftmost_child(t)) != 0) {
  3855         size_t trem = chunksize(t) - nb;
  3856         if (trem < rsize) {
  3857             rsize = trem;
  3858             v = t;
  3859         }
  3860     }
  3861 
  3862     if (RTCHECK(ok_address(m, v))) {
  3863         mchunkptr r = chunk_plus_offset(v, nb);
  3864         assert(chunksize(v) == rsize + nb);
  3865         if (RTCHECK(ok_next(v, r))) {
  3866             unlink_large_chunk(m, v);
  3867             if (rsize < MIN_CHUNK_SIZE)
  3868                 set_inuse_and_pinuse(m, v, (rsize + nb));
  3869             else {
  3870                 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3871                 set_size_and_pinuse_of_free_chunk(r, rsize);
  3872                 replace_dv(m, r, rsize);
  3873             }
  3874             return chunk2mem(v);
  3875         }
  3876     }
  3877 
  3878     CORRUPTION_ERROR_ACTION(m);
  3879     return 0;
  3880 }
  3881 
  3882 /* --------------------------- realloc support --------------------------- */
  3883 
  3884 static void *
  3885 internal_realloc(mstate m, void *oldmem, size_t bytes)
  3886 {
  3887     if (bytes >= MAX_REQUEST) {
  3888         MALLOC_FAILURE_ACTION;
  3889         return 0;
  3890     }
  3891     if (!PREACTION(m)) {
  3892         mchunkptr oldp = mem2chunk(oldmem);
  3893         size_t oldsize = chunksize(oldp);
  3894         mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3895         mchunkptr newp = 0;
  3896         void *extra = 0;
  3897 
  3898         /* Try to either shrink or extend into top. Else malloc-copy-free */
  3899 
  3900         if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3901                     ok_next(oldp, next) && ok_pinuse(next))) {
  3902             size_t nb = request2size(bytes);
  3903             if (is_mmapped(oldp))
  3904                 newp = mmap_resize(m, oldp, nb);
  3905             else if (oldsize >= nb) {   /* already big enough */
  3906                 size_t rsize = oldsize - nb;
  3907                 newp = oldp;
  3908                 if (rsize >= MIN_CHUNK_SIZE) {
  3909                     mchunkptr remainder = chunk_plus_offset(newp, nb);
  3910                     set_inuse(m, newp, nb);
  3911                     set_inuse(m, remainder, rsize);
  3912                     extra = chunk2mem(remainder);
  3913                 }
  3914             } else if (next == m->top && oldsize + m->topsize > nb) {
  3915                 /* Expand into top */
  3916                 size_t newsize = oldsize + m->topsize;
  3917                 size_t newtopsize = newsize - nb;
  3918                 mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3919                 set_inuse(m, oldp, nb);
  3920                 newtop->head = newtopsize | PINUSE_BIT;
  3921                 m->top = newtop;
  3922                 m->topsize = newtopsize;
  3923                 newp = oldp;
  3924             }
  3925         } else {
  3926             USAGE_ERROR_ACTION(m, oldmem);
  3927             POSTACTION(m);
  3928             return 0;
  3929         }
  3930 
  3931         POSTACTION(m);
  3932 
  3933         if (newp != 0) {
  3934             if (extra != 0) {
  3935                 internal_free(m, extra);
  3936             }
  3937             check_inuse_chunk(m, newp);
  3938             return chunk2mem(newp);
  3939         } else {
  3940             void *newmem = internal_malloc(m, bytes);
  3941             if (newmem != 0) {
  3942                 size_t oc = oldsize - overhead_for(oldp);
  3943                 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
  3944                 internal_free(m, oldmem);
  3945             }
  3946             return newmem;
  3947         }
  3948     }
  3949     return 0;
  3950 }
  3951 
  3952 /* --------------------------- memalign support -------------------------- */
  3953 
  3954 static void *
  3955 internal_memalign(mstate m, size_t alignment, size_t bytes)
  3956 {
  3957     if (alignment <= MALLOC_ALIGNMENT)  /* Can just use malloc */
  3958         return internal_malloc(m, bytes);
  3959     if (alignment < MIN_CHUNK_SIZE)     /* must be at least a minimum chunk size */
  3960         alignment = MIN_CHUNK_SIZE;
  3961     if ((alignment & (alignment - SIZE_T_ONE)) != 0) {  /* Ensure a power of 2 */
  3962         size_t a = MALLOC_ALIGNMENT << 1;
  3963         while (a < alignment)
  3964             a <<= 1;
  3965         alignment = a;
  3966     }
  3967 
  3968     if (bytes >= MAX_REQUEST - alignment) {
  3969         if (m != 0) {           /* Test isn't needed but avoids compiler warning */
  3970             MALLOC_FAILURE_ACTION;
  3971         }
  3972     } else {
  3973         size_t nb = request2size(bytes);
  3974         size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  3975         char *mem = (char *) internal_malloc(m, req);
  3976         if (mem != 0) {
  3977             void *leader = 0;
  3978             void *trailer = 0;
  3979             mchunkptr p = mem2chunk(mem);
  3980 
  3981             if (PREACTION(m))
  3982                 return 0;
  3983             if ((((size_t) (mem)) % alignment) != 0) {  /* misaligned */
  3984                 /*
  3985                    Find an aligned spot inside chunk.  Since we need to give
  3986                    back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  3987                    the first calculation places us at a spot with less than
  3988                    MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  3989                    We've allocated enough total room so that this is always
  3990                    possible.
  3991                  */
  3992                 char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
  3993                                                                    alignment -
  3994                                                                    SIZE_T_ONE))
  3995                                                         & -alignment));
  3996                 char *pos =
  3997                     ((size_t) (br - (char *) (p)) >=
  3998                      MIN_CHUNK_SIZE) ? br : br + alignment;
  3999                 mchunkptr newp = (mchunkptr) pos;
  4000                 size_t leadsize = pos - (char *) (p);
  4001                 size_t newsize = chunksize(p) - leadsize;
  4002 
  4003                 if (is_mmapped(p)) {    /* For mmapped chunks, just adjust offset */
  4004                     newp->prev_foot = p->prev_foot + leadsize;
  4005                     newp->head = (newsize | CINUSE_BIT);
  4006                 } else {        /* Otherwise, give back leader, use the rest */
  4007                     set_inuse(m, newp, newsize);
  4008                     set_inuse(m, p, leadsize);
  4009                     leader = chunk2mem(p);
  4010                 }
  4011                 p = newp;
  4012             }
  4013 
  4014             /* Give back spare room at the end */
  4015             if (!is_mmapped(p)) {
  4016                 size_t size = chunksize(p);
  4017                 if (size > nb + MIN_CHUNK_SIZE) {
  4018                     size_t remainder_size = size - nb;
  4019                     mchunkptr remainder = chunk_plus_offset(p, nb);
  4020                     set_inuse(m, p, nb);
  4021                     set_inuse(m, remainder, remainder_size);
  4022                     trailer = chunk2mem(remainder);
  4023                 }
  4024             }
  4025 
  4026             assert(chunksize(p) >= nb);
  4027             assert((((size_t) (chunk2mem(p))) % alignment) == 0);
  4028             check_inuse_chunk(m, p);
  4029             POSTACTION(m);
  4030             if (leader != 0) {
  4031                 internal_free(m, leader);
  4032             }
  4033             if (trailer != 0) {
  4034                 internal_free(m, trailer);
  4035             }
  4036             return chunk2mem(p);
  4037         }
  4038     }
  4039     return 0;
  4040 }
  4041 
  4042 /* ------------------------ comalloc/coalloc support --------------------- */
  4043 
  4044 static void **
  4045 ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
  4046 {
  4047     /*
  4048        This provides common support for independent_X routines, handling
  4049        all of the combinations that can result.
  4050 
  4051        The opts arg has:
  4052        bit 0 set if all elements are same size (using sizes[0])
  4053        bit 1 set if elements should be zeroed
  4054      */
  4055 
  4056     size_t element_size;        /* chunksize of each element, if all same */
  4057     size_t contents_size;       /* total size of elements */
  4058     size_t array_size;          /* request size of pointer array */
  4059     void *mem;                  /* malloced aggregate space */
  4060     mchunkptr p;                /* corresponding chunk */
  4061     size_t remainder_size;      /* remaining bytes while splitting */
  4062     void **marray;              /* either "chunks" or malloced ptr array */
  4063     mchunkptr array_chunk;      /* chunk for malloced ptr array */
  4064     flag_t was_enabled;         /* to disable mmap */
  4065     size_t size;
  4066     size_t i;
  4067 
  4068     /* compute array length, if needed */
  4069     if (chunks != 0) {
  4070         if (n_elements == 0)
  4071             return chunks;      /* nothing to do */
  4072         marray = chunks;
  4073         array_size = 0;
  4074     } else {
  4075         /* if empty req, must still return chunk representing empty array */
  4076         if (n_elements == 0)
  4077             return (void **) internal_malloc(m, 0);
  4078         marray = 0;
  4079         array_size = request2size(n_elements * (sizeof(void *)));
  4080     }
  4081 
  4082     /* compute total element size */
  4083     if (opts & 0x1) {           /* all-same-size */
  4084         element_size = request2size(*sizes);
  4085         contents_size = n_elements * element_size;
  4086     } else {                    /* add up all the sizes */
  4087         element_size = 0;
  4088         contents_size = 0;
  4089         for (i = 0; i != n_elements; ++i)
  4090             contents_size += request2size(sizes[i]);
  4091     }
  4092 
  4093     size = contents_size + array_size;
  4094 
  4095     /*
  4096        Allocate the aggregate chunk.  First disable direct-mmapping so
  4097        malloc won't use it, since we would not be able to later
  4098        free/realloc space internal to a segregated mmap region.
  4099      */
  4100     was_enabled = use_mmap(m);
  4101     disable_mmap(m);
  4102     mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  4103     if (was_enabled)
  4104         enable_mmap(m);
  4105     if (mem == 0)
  4106         return 0;
  4107 
  4108     if (PREACTION(m))
  4109         return 0;
  4110     p = mem2chunk(mem);
  4111     remainder_size = chunksize(p);
  4112 
  4113     assert(!is_mmapped(p));
  4114 
  4115     if (opts & 0x2) {           /* optionally clear the elements */
  4116         memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  4117     }
  4118 
  4119     /* If not provided, allocate the pointer array as final part of chunk */
  4120     if (marray == 0) {
  4121         size_t array_chunk_size;
  4122         array_chunk = chunk_plus_offset(p, contents_size);
  4123         array_chunk_size = remainder_size - contents_size;
  4124         marray = (void **) (chunk2mem(array_chunk));
  4125         set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  4126         remainder_size = contents_size;
  4127     }
  4128 
  4129     /* split out elements */
  4130     for (i = 0;; ++i) {
  4131         marray[i] = chunk2mem(p);
  4132         if (i != n_elements - 1) {
  4133             if (element_size != 0)
  4134                 size = element_size;
  4135             else
  4136                 size = request2size(sizes[i]);
  4137             remainder_size -= size;
  4138             set_size_and_pinuse_of_inuse_chunk(m, p, size);
  4139             p = chunk_plus_offset(p, size);
  4140         } else {                /* the final element absorbs any overallocation slop */
  4141             set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  4142             break;
  4143         }
  4144     }
  4145 
  4146 #if DEBUG
  4147     if (marray != chunks) {
  4148         /* final element must have exactly exhausted chunk */
  4149         if (element_size != 0) {
  4150             assert(remainder_size == element_size);
  4151         } else {
  4152             assert(remainder_size == request2size(sizes[i]));
  4153         }
  4154         check_inuse_chunk(m, mem2chunk(marray));
  4155     }
  4156     for (i = 0; i != n_elements; ++i)
  4157         check_inuse_chunk(m, mem2chunk(marray[i]));
  4158 
  4159 #endif /* DEBUG */
  4160 
  4161     POSTACTION(m);
  4162     return marray;
  4163 }
  4164 
  4165 
  4166 /* -------------------------- public routines ---------------------------- */
  4167 
  4168 #if !ONLY_MSPACES
  4169 
  4170 void *
  4171 dlmalloc(size_t bytes)
  4172 {
  4173     /*
  4174        Basic algorithm:
  4175        If a small request (< 256 bytes minus per-chunk overhead):
  4176        1. If one exists, use a remainderless chunk in associated smallbin.
  4177        (Remainderless means that there are too few excess bytes to
  4178        represent as a chunk.)
  4179        2. If it is big enough, use the dv chunk, which is normally the
  4180        chunk adjacent to the one used for the most recent small request.
  4181        3. If one exists, split the smallest available chunk in a bin,
  4182        saving remainder in dv.
  4183        4. If it is big enough, use the top chunk.
  4184        5. If available, get memory from system and use it
  4185        Otherwise, for a large request:
  4186        1. Find the smallest available binned chunk that fits, and use it
  4187        if it is better fitting than dv chunk, splitting if necessary.
  4188        2. If better fitting than any binned chunk, use the dv chunk.
  4189        3. If it is big enough, use the top chunk.
  4190        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4191        5. If available, get memory from system and use it
  4192 
  4193        The ugly goto's here ensure that postaction occurs along all paths.
  4194      */
  4195 
  4196     if (!PREACTION(gm)) {
  4197         void *mem;
  4198         size_t nb;
  4199         if (bytes <= MAX_SMALL_REQUEST) {
  4200             bindex_t idx;
  4201             binmap_t smallbits;
  4202             nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
  4203             idx = small_index(nb);
  4204             smallbits = gm->smallmap >> idx;
  4205 
  4206             if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
  4207                 mchunkptr b, p;
  4208                 idx += ~smallbits & 1;  /* Uses next bin if idx empty */
  4209                 b = smallbin_at(gm, idx);
  4210                 p = b->fd;
  4211                 assert(chunksize(p) == small_index2size(idx));
  4212                 unlink_first_small_chunk(gm, b, p, idx);
  4213                 set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4214                 mem = chunk2mem(p);
  4215                 check_malloced_chunk(gm, mem, nb);
  4216                 goto postaction;
  4217             }
  4218 
  4219             else if (nb > gm->dvsize) {
  4220                 if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
  4221                     mchunkptr b, p, r;
  4222                     size_t rsize;
  4223                     bindex_t i;
  4224                     binmap_t leftbits =
  4225                         (smallbits << idx) & left_bits(idx2bit(idx));
  4226                     binmap_t leastbit = least_bit(leftbits);
  4227                     compute_bit2idx(leastbit, i);
  4228                     b = smallbin_at(gm, i);
  4229                     p = b->fd;
  4230                     assert(chunksize(p) == small_index2size(i));
  4231                     unlink_first_small_chunk(gm, b, p, i);
  4232                     rsize = small_index2size(i) - nb;
  4233                     /* Fit here cannot be remainderless if 4byte sizes */
  4234                     if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4235                         set_inuse_and_pinuse(gm, p, small_index2size(i));
  4236                     else {
  4237                         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4238                         r = chunk_plus_offset(p, nb);
  4239                         set_size_and_pinuse_of_free_chunk(r, rsize);
  4240                         replace_dv(gm, r, rsize);
  4241                     }
  4242                     mem = chunk2mem(p);
  4243                     check_malloced_chunk(gm, mem, nb);
  4244                     goto postaction;
  4245                 }
  4246 
  4247                 else if (gm->treemap != 0
  4248                          && (mem = tmalloc_small(gm, nb)) != 0) {
  4249                     check_malloced_chunk(gm, mem, nb);
  4250                     goto postaction;
  4251                 }
  4252             }
  4253         } else if (bytes >= MAX_REQUEST)
  4254             nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
  4255         else {
  4256             nb = pad_request(bytes);
  4257             if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  4258                 check_malloced_chunk(gm, mem, nb);
  4259                 goto postaction;
  4260             }
  4261         }
  4262 
  4263         if (nb <= gm->dvsize) {
  4264             size_t rsize = gm->dvsize - nb;
  4265             mchunkptr p = gm->dv;
  4266             if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
  4267                 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  4268                 gm->dvsize = rsize;
  4269                 set_size_and_pinuse_of_free_chunk(r, rsize);
  4270                 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4271             } else {            /* exhaust dv */
  4272                 size_t dvs = gm->dvsize;
  4273                 gm->dvsize = 0;
  4274                 gm->dv = 0;
  4275                 set_inuse_and_pinuse(gm, p, dvs);
  4276             }
  4277             mem = chunk2mem(p);
  4278             check_malloced_chunk(gm, mem, nb);
  4279             goto postaction;
  4280         }
  4281 
  4282         else if (nb < gm->topsize) {    /* Split top */
  4283             size_t rsize = gm->topsize -= nb;
  4284             mchunkptr p = gm->top;
  4285             mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  4286             r->head = rsize | PINUSE_BIT;
  4287             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4288             mem = chunk2mem(p);
  4289             check_top_chunk(gm, gm->top);
  4290             check_malloced_chunk(gm, mem, nb);
  4291             goto postaction;
  4292         }
  4293 
  4294         mem = sys_alloc(gm, nb);
  4295 
  4296       postaction:
  4297         POSTACTION(gm);
  4298         return mem;
  4299     }
  4300 
  4301     return 0;
  4302 }
  4303 
  4304 void
  4305 dlfree(void *mem)
  4306 {
  4307     /*
  4308        Consolidate freed chunks with preceeding or succeeding bordering
  4309        free chunks, if they exist, and then place in a bin.  Intermixed
  4310        with special cases for top, dv, mmapped chunks, and usage errors.
  4311      */
  4312 
  4313     if (mem != 0) {
  4314         mchunkptr p = mem2chunk(mem);
  4315 #if FOOTERS
  4316         mstate fm = get_mstate_for(p);
  4317         if (!ok_magic(fm)) {
  4318             USAGE_ERROR_ACTION(fm, p);
  4319             return;
  4320         }
  4321 #else /* FOOTERS */
  4322 #define fm gm
  4323 #endif /* FOOTERS */
  4324         if (!PREACTION(fm)) {
  4325             check_inuse_chunk(fm, p);
  4326             if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4327                 size_t psize = chunksize(p);
  4328                 mchunkptr next = chunk_plus_offset(p, psize);
  4329                 if (!pinuse(p)) {
  4330                     size_t prevsize = p->prev_foot;
  4331                     if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4332                         prevsize &= ~IS_MMAPPED_BIT;
  4333                         psize += prevsize + MMAP_FOOT_PAD;
  4334                         if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
  4335                             fm->footprint -= psize;
  4336                         goto postaction;
  4337                     } else {
  4338                         mchunkptr prev = chunk_minus_offset(p, prevsize);
  4339                         psize += prevsize;
  4340                         p = prev;
  4341                         if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
  4342                             if (p != fm->dv) {
  4343                                 unlink_chunk(fm, p, prevsize);
  4344                             } else if ((next->head & INUSE_BITS) ==
  4345                                        INUSE_BITS) {
  4346                                 fm->dvsize = psize;
  4347                                 set_free_with_pinuse(p, psize, next);
  4348                                 goto postaction;
  4349                             }
  4350                         } else
  4351                             goto erroraction;
  4352                     }
  4353                 }
  4354 
  4355                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4356                     if (!cinuse(next)) {        /* consolidate forward */
  4357                         if (next == fm->top) {
  4358                             size_t tsize = fm->topsize += psize;
  4359                             fm->top = p;
  4360                             p->head = tsize | PINUSE_BIT;
  4361                             if (p == fm->dv) {
  4362                                 fm->dv = 0;
  4363                                 fm->dvsize = 0;
  4364                             }
  4365                             if (should_trim(fm, tsize))
  4366                                 sys_trim(fm, 0);
  4367                             goto postaction;
  4368                         } else if (next == fm->dv) {
  4369                             size_t dsize = fm->dvsize += psize;
  4370                             fm->dv = p;
  4371                             set_size_and_pinuse_of_free_chunk(p, dsize);
  4372                             goto postaction;
  4373                         } else {
  4374                             size_t nsize = chunksize(next);
  4375                             psize += nsize;
  4376                             unlink_chunk(fm, next, nsize);
  4377                             set_size_and_pinuse_of_free_chunk(p, psize);
  4378                             if (p == fm->dv) {
  4379                                 fm->dvsize = psize;
  4380                                 goto postaction;
  4381                             }
  4382                         }
  4383                     } else
  4384                         set_free_with_pinuse(p, psize, next);
  4385                     insert_chunk(fm, p, psize);
  4386                     check_free_chunk(fm, p);
  4387                     goto postaction;
  4388                 }
  4389             }
  4390           erroraction:
  4391             USAGE_ERROR_ACTION(fm, p);
  4392           postaction:
  4393             POSTACTION(fm);
  4394         }
  4395     }
  4396 #if !FOOTERS
  4397 #undef fm
  4398 #endif /* FOOTERS */
  4399 }
  4400 
  4401 void *
  4402 dlcalloc(size_t n_elements, size_t elem_size)
  4403 {
  4404     void *mem;
  4405     size_t req = 0;
  4406     if (n_elements != 0) {
  4407         req = n_elements * elem_size;
  4408         if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
  4409             (req / n_elements != elem_size))
  4410             req = MAX_SIZE_T;   /* force downstream failure on overflow */
  4411     }
  4412     mem = dlmalloc(req);
  4413     if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4414         memset(mem, 0, req);
  4415     return mem;
  4416 }
  4417 
  4418 void *
  4419 dlrealloc(void *oldmem, size_t bytes)
  4420 {
  4421     if (oldmem == 0)
  4422         return dlmalloc(bytes);
  4423 #ifdef REALLOC_ZERO_BYTES_FREES
  4424     if (bytes == 0) {
  4425         dlfree(oldmem);
  4426         return 0;
  4427     }
  4428 #endif /* REALLOC_ZERO_BYTES_FREES */
  4429     else {
  4430 #if ! FOOTERS
  4431         mstate m = gm;
  4432 #else /* FOOTERS */
  4433         mstate m = get_mstate_for(mem2chunk(oldmem));
  4434         if (!ok_magic(m)) {
  4435             USAGE_ERROR_ACTION(m, oldmem);
  4436             return 0;
  4437         }
  4438 #endif /* FOOTERS */
  4439         return internal_realloc(m, oldmem, bytes);
  4440     }
  4441 }
  4442 
  4443 void *
  4444 dlmemalign(size_t alignment, size_t bytes)
  4445 {
  4446     return internal_memalign(gm, alignment, bytes);
  4447 }
  4448 
  4449 void **
  4450 dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
  4451 {
  4452     size_t sz = elem_size;      /* serves as 1-element array */
  4453     return ialloc(gm, n_elements, &sz, 3, chunks);
  4454 }
  4455 
  4456 void **
  4457 dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
  4458 {
  4459     return ialloc(gm, n_elements, sizes, 0, chunks);
  4460 }
  4461 
  4462 void *
  4463 dlvalloc(size_t bytes)
  4464 {
  4465     size_t pagesz;
  4466     init_mparams();
  4467     pagesz = mparams.page_size;
  4468     return dlmemalign(pagesz, bytes);
  4469 }
  4470 
  4471 void *
  4472 dlpvalloc(size_t bytes)
  4473 {
  4474     size_t pagesz;
  4475     init_mparams();
  4476     pagesz = mparams.page_size;
  4477     return dlmemalign(pagesz,
  4478                       (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  4479 }
  4480 
  4481 int
  4482 dlmalloc_trim(size_t pad)
  4483 {
  4484     int result = 0;
  4485     if (!PREACTION(gm)) {
  4486         result = sys_trim(gm, pad);
  4487         POSTACTION(gm);
  4488     }
  4489     return result;
  4490 }
  4491 
  4492 size_t
  4493 dlmalloc_footprint(void)
  4494 {
  4495     return gm->footprint;
  4496 }
  4497 
  4498 size_t
  4499 dlmalloc_max_footprint(void)
  4500 {
  4501     return gm->max_footprint;
  4502 }
  4503 
  4504 #if !NO_MALLINFO
  4505 struct mallinfo
  4506 dlmallinfo(void)
  4507 {
  4508     return internal_mallinfo(gm);
  4509 }
  4510 #endif /* NO_MALLINFO */
  4511 
  4512 void
  4513 dlmalloc_stats()
  4514 {
  4515     internal_malloc_stats(gm);
  4516 }
  4517 
  4518 size_t
  4519 dlmalloc_usable_size(void *mem)
  4520 {
  4521     if (mem != 0) {
  4522         mchunkptr p = mem2chunk(mem);
  4523         if (cinuse(p))
  4524             return chunksize(p) - overhead_for(p);
  4525     }
  4526     return 0;
  4527 }
  4528 
  4529 int
  4530 dlmallopt(int param_number, int value)
  4531 {
  4532     return change_mparam(param_number, value);
  4533 }
  4534 
  4535 #endif /* !ONLY_MSPACES */
  4536 
  4537 /* ----------------------------- user mspaces ---------------------------- */
  4538 
  4539 #if MSPACES
  4540 
  4541 static mstate
  4542 init_user_mstate(char *tbase, size_t tsize)
  4543 {
  4544     size_t msize = pad_request(sizeof(struct malloc_state));
  4545     mchunkptr mn;
  4546     mchunkptr msp = align_as_chunk(tbase);
  4547     mstate m = (mstate) (chunk2mem(msp));
  4548     memset(m, 0, msize);
  4549     INITIAL_LOCK(&m->mutex);
  4550     msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
  4551     m->seg.base = m->least_addr = tbase;
  4552     m->seg.size = m->footprint = m->max_footprint = tsize;
  4553     m->magic = mparams.magic;
  4554     m->mflags = mparams.default_mflags;
  4555     disable_contiguous(m);
  4556     init_bins(m);
  4557     mn = next_chunk(mem2chunk(m));
  4558     init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
  4559     check_top_chunk(m, m->top);
  4560     return m;
  4561 }
  4562 
  4563 mspace
  4564 create_mspace(size_t capacity, int locked)
  4565 {
  4566     mstate m = 0;
  4567     size_t msize = pad_request(sizeof(struct malloc_state));
  4568     init_mparams();             /* Ensure pagesize etc initialized */
  4569 
  4570     if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4571         size_t rs = ((capacity == 0) ? mparams.granularity :
  4572                      (capacity + TOP_FOOT_SIZE + msize));
  4573         size_t tsize = granularity_align(rs);
  4574         char *tbase = (char *) (CALL_MMAP(tsize));
  4575         if (tbase != CMFAIL) {
  4576             m = init_user_mstate(tbase, tsize);
  4577             m->seg.sflags = IS_MMAPPED_BIT;
  4578             set_lock(m, locked);
  4579         }
  4580     }
  4581     return (mspace) m;
  4582 }
  4583 
  4584 mspace
  4585 create_mspace_with_base(void *base, size_t capacity, int locked)
  4586 {
  4587     mstate m = 0;
  4588     size_t msize = pad_request(sizeof(struct malloc_state));
  4589     init_mparams();             /* Ensure pagesize etc initialized */
  4590 
  4591     if (capacity > msize + TOP_FOOT_SIZE &&
  4592         capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4593         m = init_user_mstate((char *) base, capacity);
  4594         m->seg.sflags = EXTERN_BIT;
  4595         set_lock(m, locked);
  4596     }
  4597     return (mspace) m;
  4598 }
  4599 
  4600 size_t
  4601 destroy_mspace(mspace msp)
  4602 {
  4603     size_t freed = 0;
  4604     mstate ms = (mstate) msp;
  4605     if (ok_magic(ms)) {
  4606         msegmentptr sp = &ms->seg;
  4607         while (sp != 0) {
  4608             char *base = sp->base;
  4609             size_t size = sp->size;
  4610             flag_t flag = sp->sflags;
  4611             sp = sp->next;
  4612             if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4613                 CALL_MUNMAP(base, size) == 0)
  4614                 freed += size;
  4615         }
  4616     } else {
  4617         USAGE_ERROR_ACTION(ms, ms);
  4618     }
  4619     return freed;
  4620 }
  4621 
  4622 /*
  4623   mspace versions of routines are near-clones of the global
  4624   versions. This is not so nice but better than the alternatives.
  4625 */
  4626 
  4627 
  4628 void *
  4629 mspace_malloc(mspace msp, size_t bytes)
  4630 {
  4631     mstate ms = (mstate) msp;
  4632     if (!ok_magic(ms)) {
  4633         USAGE_ERROR_ACTION(ms, ms);
  4634         return 0;
  4635     }
  4636     if (!PREACTION(ms)) {
  4637         void *mem;
  4638         size_t nb;
  4639         if (bytes <= MAX_SMALL_REQUEST) {
  4640             bindex_t idx;
  4641             binmap_t smallbits;
  4642             nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
  4643             idx = small_index(nb);
  4644             smallbits = ms->smallmap >> idx;
  4645 
  4646             if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
  4647                 mchunkptr b, p;
  4648                 idx += ~smallbits & 1;  /* Uses next bin if idx empty */
  4649                 b = smallbin_at(ms, idx);
  4650                 p = b->fd;
  4651                 assert(chunksize(p) == small_index2size(idx));
  4652                 unlink_first_small_chunk(ms, b, p, idx);
  4653                 set_inuse_and_pinuse(ms, p, small_index2size(idx));
  4654                 mem = chunk2mem(p);
  4655                 check_malloced_chunk(ms, mem, nb);
  4656                 goto postaction;
  4657             }
  4658 
  4659             else if (nb > ms->dvsize) {
  4660                 if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
  4661                     mchunkptr b, p, r;
  4662                     size_t rsize;
  4663                     bindex_t i;
  4664                     binmap_t leftbits =
  4665                         (smallbits << idx) & left_bits(idx2bit(idx));
  4666                     binmap_t leastbit = least_bit(leftbits);
  4667                     compute_bit2idx(leastbit, i);
  4668                     b = smallbin_at(ms, i);
  4669                     p = b->fd;
  4670                     assert(chunksize(p) == small_index2size(i));
  4671                     unlink_first_small_chunk(ms, b, p, i);
  4672                     rsize = small_index2size(i) - nb;
  4673                     /* Fit here cannot be remainderless if 4byte sizes */
  4674                     if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4675                         set_inuse_and_pinuse(ms, p, small_index2size(i));
  4676                     else {
  4677                         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4678                         r = chunk_plus_offset(p, nb);
  4679                         set_size_and_pinuse_of_free_chunk(r, rsize);
  4680                         replace_dv(ms, r, rsize);
  4681                     }
  4682                     mem = chunk2mem(p);
  4683                     check_malloced_chunk(ms, mem, nb);
  4684                     goto postaction;
  4685                 }
  4686 
  4687                 else if (ms->treemap != 0
  4688                          && (mem = tmalloc_small(ms, nb)) != 0) {
  4689                     check_malloced_chunk(ms, mem, nb);
  4690                     goto postaction;
  4691                 }
  4692             }
  4693         } else if (bytes >= MAX_REQUEST)
  4694             nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
  4695         else {
  4696             nb = pad_request(bytes);
  4697             if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  4698                 check_malloced_chunk(ms, mem, nb);
  4699                 goto postaction;
  4700             }
  4701         }
  4702 
  4703         if (nb <= ms->dvsize) {
  4704             size_t rsize = ms->dvsize - nb;
  4705             mchunkptr p = ms->dv;
  4706             if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
  4707                 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  4708                 ms->dvsize = rsize;
  4709                 set_size_and_pinuse_of_free_chunk(r, rsize);
  4710                 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4711             } else {            /* exhaust dv */
  4712                 size_t dvs = ms->dvsize;
  4713                 ms->dvsize = 0;
  4714                 ms->dv = 0;
  4715                 set_inuse_and_pinuse(ms, p, dvs);
  4716             }
  4717             mem = chunk2mem(p);
  4718             check_malloced_chunk(ms, mem, nb);
  4719             goto postaction;
  4720         }
  4721 
  4722         else if (nb < ms->topsize) {    /* Split top */
  4723             size_t rsize = ms->topsize -= nb;
  4724             mchunkptr p = ms->top;
  4725             mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  4726             r->head = rsize | PINUSE_BIT;
  4727             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4728             mem = chunk2mem(p);
  4729             check_top_chunk(ms, ms->top);
  4730             check_malloced_chunk(ms, mem, nb);
  4731             goto postaction;
  4732         }
  4733 
  4734         mem = sys_alloc(ms, nb);
  4735 
  4736       postaction:
  4737         POSTACTION(ms);
  4738         return mem;
  4739     }
  4740 
  4741     return 0;
  4742 }
  4743 
  4744 void
  4745 mspace_free(mspace msp, void *mem)
  4746 {
  4747     if (mem != 0) {
  4748         mchunkptr p = mem2chunk(mem);
  4749 #if FOOTERS
  4750         mstate fm = get_mstate_for(p);
  4751 #else /* FOOTERS */
  4752         mstate fm = (mstate) msp;
  4753 #endif /* FOOTERS */
  4754         if (!ok_magic(fm)) {
  4755             USAGE_ERROR_ACTION(fm, p);
  4756             return;
  4757         }
  4758         if (!PREACTION(fm)) {
  4759             check_inuse_chunk(fm, p);
  4760             if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4761                 size_t psize = chunksize(p);
  4762                 mchunkptr next = chunk_plus_offset(p, psize);
  4763                 if (!pinuse(p)) {
  4764                     size_t prevsize = p->prev_foot;
  4765                     if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4766                         prevsize &= ~IS_MMAPPED_BIT;
  4767                         psize += prevsize + MMAP_FOOT_PAD;
  4768                         if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
  4769                             fm->footprint -= psize;
  4770                         goto postaction;
  4771                     } else {
  4772                         mchunkptr prev = chunk_minus_offset(p, prevsize);
  4773                         psize += prevsize;
  4774                         p = prev;
  4775                         if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
  4776                             if (p != fm->dv) {
  4777                                 unlink_chunk(fm, p, prevsize);
  4778                             } else if ((next->head & INUSE_BITS) ==
  4779                                        INUSE_BITS) {
  4780                                 fm->dvsize = psize;
  4781                                 set_free_with_pinuse(p, psize, next);
  4782                                 goto postaction;
  4783                             }
  4784                         } else
  4785                             goto erroraction;
  4786                     }
  4787                 }
  4788 
  4789                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4790                     if (!cinuse(next)) {        /* consolidate forward */
  4791                         if (next == fm->top) {
  4792                             size_t tsize = fm->topsize += psize;
  4793                             fm->top = p;
  4794                             p->head = tsize | PINUSE_BIT;
  4795                             if (p == fm->dv) {
  4796                                 fm->dv = 0;
  4797                                 fm->dvsize = 0;
  4798                             }
  4799                             if (should_trim(fm, tsize))
  4800                                 sys_trim(fm, 0);
  4801                             goto postaction;
  4802                         } else if (next == fm->dv) {
  4803                             size_t dsize = fm->dvsize += psize;
  4804                             fm->dv = p;
  4805                             set_size_and_pinuse_of_free_chunk(p, dsize);
  4806                             goto postaction;
  4807                         } else {
  4808                             size_t nsize = chunksize(next);
  4809                             psize += nsize;
  4810                             unlink_chunk(fm, next, nsize);
  4811                             set_size_and_pinuse_of_free_chunk(p, psize);
  4812                             if (p == fm->dv) {
  4813                                 fm->dvsize = psize;
  4814                                 goto postaction;
  4815                             }
  4816                         }
  4817                     } else
  4818                         set_free_with_pinuse(p, psize, next);
  4819                     insert_chunk(fm, p, psize);
  4820                     check_free_chunk(fm, p);
  4821                     goto postaction;
  4822                 }
  4823             }
  4824           erroraction:
  4825             USAGE_ERROR_ACTION(fm, p);
  4826           postaction:
  4827             POSTACTION(fm);
  4828         }
  4829     }
  4830 }
  4831 
  4832 void *
  4833 mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
  4834 {
  4835     void *mem;
  4836     size_t req = 0;
  4837     mstate ms = (mstate) msp;
  4838     if (!ok_magic(ms)) {
  4839         USAGE_ERROR_ACTION(ms, ms);
  4840         return 0;
  4841     }
  4842     if (n_elements != 0) {
  4843         req = n_elements * elem_size;
  4844         if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
  4845             (req / n_elements != elem_size))
  4846             req = MAX_SIZE_T;   /* force downstream failure on overflow */
  4847     }
  4848     mem = internal_malloc(ms, req);
  4849     if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4850         memset(mem, 0, req);
  4851     return mem;
  4852 }
  4853 
  4854 void *
  4855 mspace_realloc(mspace msp, void *oldmem, size_t bytes)
  4856 {
  4857     if (oldmem == 0)
  4858         return mspace_malloc(msp, bytes);
  4859 #ifdef REALLOC_ZERO_BYTES_FREES
  4860     if (bytes == 0) {
  4861         mspace_free(msp, oldmem);
  4862         return 0;
  4863     }
  4864 #endif /* REALLOC_ZERO_BYTES_FREES */
  4865     else {
  4866 #if FOOTERS
  4867         mchunkptr p = mem2chunk(oldmem);
  4868         mstate ms = get_mstate_for(p);
  4869 #else /* FOOTERS */
  4870         mstate ms = (mstate) msp;
  4871 #endif /* FOOTERS */
  4872         if (!ok_magic(ms)) {
  4873             USAGE_ERROR_ACTION(ms, ms);
  4874             return 0;
  4875         }
  4876         return internal_realloc(ms, oldmem, bytes);
  4877     }
  4878 }
  4879 
  4880 void *
  4881 mspace_memalign(mspace msp, size_t alignment, size_t bytes)
  4882 {
  4883     mstate ms = (mstate) msp;
  4884     if (!ok_magic(ms)) {
  4885         USAGE_ERROR_ACTION(ms, ms);
  4886         return 0;
  4887     }
  4888     return internal_memalign(ms, alignment, bytes);
  4889 }
  4890 
  4891 void **
  4892 mspace_independent_calloc(mspace msp, size_t n_elements,
  4893                           size_t elem_size, void *chunks[])
  4894 {
  4895     size_t sz = elem_size;      /* serves as 1-element array */
  4896     mstate ms = (mstate) msp;
  4897     if (!ok_magic(ms)) {
  4898         USAGE_ERROR_ACTION(ms, ms);
  4899         return 0;
  4900     }
  4901     return ialloc(ms, n_elements, &sz, 3, chunks);
  4902 }
  4903 
  4904 void **
  4905 mspace_independent_comalloc(mspace msp, size_t n_elements,
  4906                             size_t sizes[], void *chunks[])
  4907 {
  4908     mstate ms = (mstate) msp;
  4909     if (!ok_magic(ms)) {
  4910         USAGE_ERROR_ACTION(ms, ms);
  4911         return 0;
  4912     }
  4913     return ialloc(ms, n_elements, sizes, 0, chunks);
  4914 }
  4915 
  4916 int
  4917 mspace_trim(mspace msp, size_t pad)
  4918 {
  4919     int result = 0;
  4920     mstate ms = (mstate) msp;
  4921     if (ok_magic(ms)) {
  4922         if (!PREACTION(ms)) {
  4923             result = sys_trim(ms, pad);
  4924             POSTACTION(ms);
  4925         }
  4926     } else {
  4927         USAGE_ERROR_ACTION(ms, ms);
  4928     }
  4929     return result;
  4930 }
  4931 
  4932 void
  4933 mspace_malloc_stats(mspace msp)
  4934 {
  4935     mstate ms = (mstate) msp;
  4936     if (ok_magic(ms)) {
  4937         internal_malloc_stats(ms);
  4938     } else {
  4939         USAGE_ERROR_ACTION(ms, ms);
  4940     }
  4941 }
  4942 
  4943 size_t
  4944 mspace_footprint(mspace msp)
  4945 {
  4946     size_t result;
  4947     mstate ms = (mstate) msp;
  4948     if (ok_magic(ms)) {
  4949         result = ms->footprint;
  4950     }
  4951     USAGE_ERROR_ACTION(ms, ms);
  4952     return result;
  4953 }
  4954 
  4955 
  4956 size_t
  4957 mspace_max_footprint(mspace msp)
  4958 {
  4959     size_t result;
  4960     mstate ms = (mstate) msp;
  4961     if (ok_magic(ms)) {
  4962         result = ms->max_footprint;
  4963     }
  4964     USAGE_ERROR_ACTION(ms, ms);
  4965     return result;
  4966 }
  4967 
  4968 
  4969 #if !NO_MALLINFO
  4970 struct mallinfo
  4971 mspace_mallinfo(mspace msp)
  4972 {
  4973     mstate ms = (mstate) msp;
  4974     if (!ok_magic(ms)) {
  4975         USAGE_ERROR_ACTION(ms, ms);
  4976     }
  4977     return internal_mallinfo(ms);
  4978 }
  4979 #endif /* NO_MALLINFO */
  4980 
  4981 int
  4982 mspace_mallopt(int param_number, int value)
  4983 {
  4984     return change_mparam(param_number, value);
  4985 }
  4986 
  4987 #endif /* MSPACES */
  4988 
  4989 /* -------------------- Alternative MORECORE functions ------------------- */
  4990 
  4991 /*
  4992   Guidelines for creating a custom version of MORECORE:
  4993 
  4994   * For best performance, MORECORE should allocate in multiples of pagesize.
  4995   * MORECORE may allocate more memory than requested. (Or even less,
  4996       but this will usually result in a malloc failure.)
  4997   * MORECORE must not allocate memory when given argument zero, but
  4998       instead return one past the end address of memory from previous
  4999       nonzero call.
  5000   * For best performance, consecutive calls to MORECORE with positive
  5001       arguments should return increasing addresses, indicating that
  5002       space has been contiguously extended.
  5003   * Even though consecutive calls to MORECORE need not return contiguous
  5004       addresses, it must be OK for malloc'ed chunks to span multiple
  5005       regions in those cases where they do happen to be contiguous.
  5006   * MORECORE need not handle negative arguments -- it may instead
  5007       just return MFAIL when given negative arguments.
  5008       Negative arguments are always multiples of pagesize. MORECORE
  5009       must not misinterpret negative args as large positive unsigned
  5010       args. You can suppress all such calls from even occurring by defining
  5011       MORECORE_CANNOT_TRIM,
  5012 
  5013   As an example alternative MORECORE, here is a custom allocator
  5014   kindly contributed for pre-OSX macOS.  It uses virtually but not
  5015   necessarily physically contiguous non-paged memory (locked in,
  5016   present and won't get swapped out).  You can use it by uncommenting
  5017   this section, adding some #includes, and setting up the appropriate
  5018   defines above:
  5019 
  5020       #define MORECORE osMoreCore
  5021 
  5022   There is also a shutdown routine that should somehow be called for
  5023   cleanup upon program exit.
  5024 
  5025   #define MAX_POOL_ENTRIES 100
  5026   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  5027   static int next_os_pool;
  5028   void *our_os_pools[MAX_POOL_ENTRIES];
  5029 
  5030   void *osMoreCore(int size)
  5031   {
  5032     void *ptr = 0;
  5033     static void *sbrk_top = 0;
  5034 
  5035     if (size > 0)
  5036     {
  5037       if (size < MINIMUM_MORECORE_SIZE)
  5038          size = MINIMUM_MORECORE_SIZE;
  5039       if (CurrentExecutionLevel() == kTaskLevel)
  5040          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  5041       if (ptr == 0)
  5042       {
  5043         return (void *) MFAIL;
  5044       }
  5045       // save ptrs so they can be freed during cleanup
  5046       our_os_pools[next_os_pool] = ptr;
  5047       next_os_pool++;
  5048       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  5049       sbrk_top = (char *) ptr + size;
  5050       return ptr;
  5051     }
  5052     else if (size < 0)
  5053     {
  5054       // we don't currently support shrink behavior
  5055       return (void *) MFAIL;
  5056     }
  5057     else
  5058     {
  5059       return sbrk_top;
  5060     }
  5061   }
  5062 
  5063   // cleanup any allocated memory pools
  5064   // called as last thing before shutting down driver
  5065 
  5066   void osCleanupMem(void)
  5067   {
  5068     void **ptr;
  5069 
  5070     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  5071       if (*ptr)
  5072       {
  5073          PoolDeallocate(*ptr);
  5074          *ptr = 0;
  5075       }
  5076   }
  5077 
  5078 */
  5079 
  5080 
  5081 /* -----------------------------------------------------------------------
  5082 History:
  5083     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  5084       * Add max_footprint functions
  5085       * Ensure all appropriate literals are size_t
  5086       * Fix conditional compilation problem for some #define settings
  5087       * Avoid concatenating segments with the one provided
  5088         in create_mspace_with_base
  5089       * Rename some variables to avoid compiler shadowing warnings
  5090       * Use explicit lock initialization.
  5091       * Better handling of sbrk interference.
  5092       * Simplify and fix segment insertion, trimming and mspace_destroy
  5093       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  5094       * Thanks especially to Dennis Flanagan for help on these.
  5095 
  5096     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  5097       * Fix memalign brace error.
  5098 
  5099     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  5100       * Fix improper #endif nesting in C++
  5101       * Add explicit casts needed for C++
  5102 
  5103     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  5104       * Use trees for large bins
  5105       * Support mspaces
  5106       * Use segments to unify sbrk-based and mmap-based system allocation,
  5107         removing need for emulation on most platforms without sbrk.
  5108       * Default safety checks
  5109       * Optional footer checks. Thanks to William Robertson for the idea.
  5110       * Internal code refactoring
  5111       * Incorporate suggestions and platform-specific changes.
  5112         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  5113         Aaron Bachmann,  Emery Berger, and others.
  5114       * Speed up non-fastbin processing enough to remove fastbins.
  5115       * Remove useless cfree() to avoid conflicts with other apps.
  5116       * Remove internal memcpy, memset. Compilers handle builtins better.
  5117       * Remove some options that no one ever used and rename others.
  5118 
  5119     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  5120       * Fix malloc_state bitmap array misdeclaration
  5121 
  5122     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  5123       * Allow tuning of FIRST_SORTED_BIN_SIZE
  5124       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  5125       * Better detection and support for non-contiguousness of MORECORE.
  5126         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  5127       * Bypass most of malloc if no frees. Thanks To Emery Berger.
  5128       * Fix freeing of old top non-contiguous chunk im sysmalloc.
  5129       * Raised default trim and map thresholds to 256K.
  5130       * Fix mmap-related #defines. Thanks to Lubos Lunak.
  5131       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  5132       * Branch-free bin calculation
  5133       * Default trim and mmap thresholds now 256K.
  5134 
  5135     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  5136       * Introduce independent_comalloc and independent_calloc.
  5137         Thanks to Michael Pachos for motivation and help.
  5138       * Make optional .h file available
  5139       * Allow > 2GB requests on 32bit systems.
  5140       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  5141         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  5142         and Anonymous.
  5143       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  5144         helping test this.)
  5145       * memalign: check alignment arg
  5146       * realloc: don't try to shift chunks backwards, since this
  5147         leads to  more fragmentation in some programs and doesn't
  5148         seem to help in any others.
  5149       * Collect all cases in malloc requiring system memory into sysmalloc
  5150       * Use mmap as backup to sbrk
  5151       * Place all internal state in malloc_state
  5152       * Introduce fastbins (although similar to 2.5.1)
  5153       * Many minor tunings and cosmetic improvements
  5154       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5155       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5156         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5157       * Include errno.h to support default failure action.
  5158 
  5159     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5160       * return null for negative arguments
  5161       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5162          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5163           (e.g. WIN32 platforms)
  5164          * Cleanup header file inclusion for WIN32 platforms
  5165          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5166          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5167            memory allocation routines
  5168          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5169          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5170            usage of 'assert' in non-WIN32 code
  5171          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5172            avoid infinite loop
  5173       * Always call 'fREe()' rather than 'free()'
  5174 
  5175     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5176       * Fixed ordering problem with boundary-stamping
  5177 
  5178     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5179       * Added pvalloc, as recommended by H.J. Liu
  5180       * Added 64bit pointer support mainly from Wolfram Gloger
  5181       * Added anonymously donated WIN32 sbrk emulation
  5182       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5183       * malloc_extend_top: fix mask error that caused wastage after
  5184         foreign sbrks
  5185       * Add linux mremap support code from HJ Liu
  5186 
  5187     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5188       * Integrated most documentation with the code.
  5189       * Add support for mmap, with help from
  5190         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5191       * Use last_remainder in more cases.
  5192       * Pack bins using idea from  colin@nyx10.cs.du.edu
  5193       * Use ordered bins instead of best-fit threshhold
  5194       * Eliminate block-local decls to simplify tracing and debugging.
  5195       * Support another case of realloc via move into top
  5196       * Fix error occuring when initial sbrk_base not word-aligned.
  5197       * Rely on page size for units instead of SBRK_UNIT to
  5198         avoid surprises about sbrk alignment conventions.
  5199       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5200         (raymond@es.ele.tue.nl) for the suggestion.
  5201       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5202       * More precautions for cases where other routines call sbrk,
  5203         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5204       * Added macros etc., allowing use in linux libc from
  5205         H.J. Lu (hjl@gnu.ai.mit.edu)
  5206       * Inverted this history list
  5207 
  5208     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5209       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5210       * Removed all preallocation code since under current scheme
  5211         the work required to undo bad preallocations exceeds
  5212         the work saved in good cases for most test programs.
  5213       * No longer use return list or unconsolidated bins since
  5214         no scheme using them consistently outperforms those that don't
  5215         given above changes.
  5216       * Use best fit for very large chunks to prevent some worst-cases.
  5217       * Added some support for debugging
  5218 
  5219     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5220       * Removed footers when chunks are in use. Thanks to
  5221         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5222 
  5223     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5224       * Added malloc_trim, with help from Wolfram Gloger
  5225         (wmglo@Dent.MED.Uni-Muenchen.DE).
  5226 
  5227     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5228 
  5229     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5230       * realloc: try to expand in both directions
  5231       * malloc: swap order of clean-bin strategy;
  5232       * realloc: only conditionally expand backwards
  5233       * Try not to scavenge used bins
  5234       * Use bin counts as a guide to preallocation
  5235       * Occasionally bin return list chunks in first scan
  5236       * Add a few optimizations from colin@nyx10.cs.du.edu
  5237 
  5238     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5239       * faster bin computation & slightly different binning
  5240       * merged all consolidations to one part of malloc proper
  5241          (eliminating old malloc_find_space & malloc_clean_bin)
  5242       * Scan 2 returns chunks (not just 1)
  5243       * Propagate failure in realloc if malloc returns 0
  5244       * Add stuff to allow compilation on non-ANSI compilers
  5245           from kpv@research.att.com
  5246 
  5247     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5248       * removed potential for odd address access in prev_chunk
  5249       * removed dependency on getpagesize.h
  5250       * misc cosmetics and a bit more internal documentation
  5251       * anticosmetics: mangled names in macros to evade debugger strangeness
  5252       * tested on sparc, hp-700, dec-mips, rs6000
  5253           with gcc & native cc (hp, dec only) allowing
  5254           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5255 
  5256     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5257       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5258          structure of old version,  but most details differ.)
  5259 
  5260 */
  5261 
  5262 #endif /* !HAVE_MALLOC */
  5263 
  5264 /* vi: set ts=4 sw=4 expandtab: */