src/stdlib/SDL_malloc.c
author Ozkan Sezer <sezeroz@gmail.com>
Sat, 14 Sep 2019 17:00:50 +0300
branchSDL-1.2
changeset 13077 cd46eb772bee
parent 13076 58a05dc1e7de
permissions -rw-r--r--
make SDL_qsort.c to actually link (missing SDL_stdinc.h include)

also sync its comments with version 1.15 from mainstream
     1 /*
     2     SDL - Simple DirectMedia Layer
     3     Copyright (C) 1997-2012 Sam Lantinga
     4 
     5     This library is free software; you can redistribute it and/or
     6     modify it under the terms of the GNU Lesser General Public
     7     License as published by the Free Software Foundation; either
     8     version 2.1 of the License, or (at your option) any later version.
     9 
    10     This library is distributed in the hope that it will be useful,
    11     but WITHOUT ANY WARRANTY; without even the implied warranty of
    12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    13     Lesser General Public License for more details.
    14 
    15     You should have received a copy of the GNU Lesser General Public
    16     License along with this library; if not, write to the Free Software
    17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    18 
    19     Sam Lantinga
    20     slouken@libsdl.org
    21 */
    22 #include "SDL_config.h"
    23 
    24 /* This file contains portable memory management functions for SDL */
    25 
    26 #include "SDL_stdinc.h"
    27 
    28 #ifndef HAVE_MALLOC
    29 
    30 #define LACKS_SYS_TYPES_H
    31 #define LACKS_STDIO_H
    32 #define LACKS_STRINGS_H
    33 #define LACKS_STRING_H
    34 #define LACKS_STDLIB_H
    35 #define ABORT
    36 
    37 /*
    38   This is a version (aka dlmalloc) of malloc/free/realloc written by
    39   Doug Lea and released to the public domain, as explained at
    40   http://creativecommons.org/licenses/publicdomain.  Send questions,
    41   comments, complaints, performance data, etc to dl@cs.oswego.edu
    42 
    43 * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    44 
    45    Note: There may be an updated version of this malloc obtainable at
    46            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    47          Check before installing!
    48 
    49 * Quickstart
    50 
    51   This library is all in one file to simplify the most common usage:
    52   ftp it, compile it (-O3), and link it into another program. All of
    53   the compile-time options default to reasonable values for use on
    54   most platforms.  You might later want to step through various
    55   compile-time and dynamic tuning options.
    56 
    57   For convenience, an include file for code using this malloc is at:
    58      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    59   You don't really need this .h file unless you call functions not
    60   defined in your system include files.  The .h file contains only the
    61   excerpts from this file needed for using this malloc on ANSI C/C++
    62   systems, so long as you haven't changed compile-time options about
    63   naming and tuning parameters.  If you do, then you can create your
    64   own malloc.h that does include all settings by cutting at the point
    65   indicated below. Note that you may already by default be using a C
    66   library containing a malloc that is based on some version of this
    67   malloc (for example in linux). You might still want to use the one
    68   in this file to customize settings or to avoid overheads associated
    69   with library versions.
    70 
    71 * Vital statistics:
    72 
    73   Supported pointer/size_t representation:       4 or 8 bytes
    74        size_t MUST be an unsigned type of the same width as
    75        pointers. (If you are using an ancient system that declares
    76        size_t as a signed type, or need it to be a different width
    77        than pointers, you can use a previous release of this malloc
    78        (e.g. 2.7.2) supporting these.)
    79 
    80   Alignment:                                     8 bytes (default)
    81        This suffices for nearly all current machines and C compilers.
    82        However, you can define MALLOC_ALIGNMENT to be wider than this
    83        if necessary (up to 128bytes), at the expense of using more space.
    84 
    85   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    86                                           8 or 16 bytes (if 8byte sizes)
    87        Each malloced chunk has a hidden word of overhead holding size
    88        and status information, and additional cross-check word
    89        if FOOTERS is defined.
    90 
    91   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
    92                           8-byte ptrs:  32 bytes    (including overhead)
    93 
    94        Even a request for zero bytes (i.e., malloc(0)) returns a
    95        pointer to something of the minimum allocatable size.
    96        The maximum overhead wastage (i.e., number of extra bytes
    97        allocated than were requested in malloc) is less than or equal
    98        to the minimum size, except for requests >= mmap_threshold that
    99        are serviced via mmap(), where the worst case wastage is about
   100        32 bytes plus the remainder from a system page (the minimal
   101        mmap unit); typically 4096 or 8192 bytes.
   102 
   103   Security: static-safe; optionally more or less
   104        The "security" of malloc refers to the ability of malicious
   105        code to accentuate the effects of errors (for example, freeing
   106        space that is not currently malloc'ed or overwriting past the
   107        ends of chunks) in code that calls malloc.  This malloc
   108        guarantees not to modify any memory locations below the base of
   109        heap, i.e., static variables, even in the presence of usage
   110        errors.  The routines additionally detect most improper frees
   111        and reallocs.  All this holds as long as the static bookkeeping
   112        for malloc itself is not corrupted by some other means.  This
   113        is only one aspect of security -- these checks do not, and
   114        cannot, detect all possible programming errors.
   115 
   116        If FOOTERS is defined nonzero, then each allocated chunk
   117        carries an additional check word to verify that it was malloced
   118        from its space.  These check words are the same within each
   119        execution of a program using malloc, but differ across
   120        executions, so externally crafted fake chunks cannot be
   121        freed. This improves security by rejecting frees/reallocs that
   122        could corrupt heap memory, in addition to the checks preventing
   123        writes to statics that are always on.  This may further improve
   124        security at the expense of time and space overhead.  (Note that
   125        FOOTERS may also be worth using with MSPACES.)
   126 
   127        By default detected errors cause the program to abort (calling
   128        "abort()"). You can override this to instead proceed past
   129        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   130        has no effect, and a malloc that encounters a bad address
   131        caused by user overwrites will ignore the bad address by
   132        dropping pointers and indices to all known memory. This may
   133        be appropriate for programs that should continue if at all
   134        possible in the face of programming errors, although they may
   135        run out of memory because dropped memory is never reclaimed.
   136 
   137        If you don't like either of these options, you can define
   138        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   139        else. And if if you are sure that your program using malloc has
   140        no errors or vulnerabilities, you can define INSECURE to 1,
   141        which might (or might not) provide a small performance improvement.
   142 
   143   Thread-safety: NOT thread-safe unless USE_LOCKS defined
   144        When USE_LOCKS is defined, each public call to malloc, free,
   145        etc is surrounded with either a pthread mutex or a win32
   146        spinlock (depending on WIN32). This is not especially fast, and
   147        can be a major bottleneck.  It is designed only to provide
   148        minimal protection in concurrent environments, and to provide a
   149        basis for extensions.  If you are using malloc in a concurrent
   150        program, consider instead using ptmalloc, which is derived from
   151        a version of this malloc. (See http://www.malloc.de).
   152 
   153   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   154        This malloc can use unix sbrk or any emulation (invoked using
   155        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   156        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   157        memory.  On most unix systems, it tends to work best if both
   158        MORECORE and MMAP are enabled.  On Win32, it uses emulations
   159        based on VirtualAlloc. It also uses common C library functions
   160        like memset.
   161 
   162   Compliance: I believe it is compliant with the Single Unix Specification
   163        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   164        others as well.
   165 
   166 * Overview of algorithms
   167 
   168   This is not the fastest, most space-conserving, most portable, or
   169   most tunable malloc ever written. However it is among the fastest
   170   while also being among the most space-conserving, portable and
   171   tunable.  Consistent balance across these factors results in a good
   172   general-purpose allocator for malloc-intensive programs.
   173 
   174   In most ways, this malloc is a best-fit allocator. Generally, it
   175   chooses the best-fitting existing chunk for a request, with ties
   176   broken in approximately least-recently-used order. (This strategy
   177   normally maintains low fragmentation.) However, for requests less
   178   than 256bytes, it deviates from best-fit when there is not an
   179   exactly fitting available chunk by preferring to use space adjacent
   180   to that used for the previous small request, as well as by breaking
   181   ties in approximately most-recently-used order. (These enhance
   182   locality of series of small allocations.)  And for very large requests
   183   (>= 256Kb by default), it relies on system memory mapping
   184   facilities, if supported.  (This helps avoid carrying around and
   185   possibly fragmenting memory used only for large chunks.)
   186 
   187   All operations (except malloc_stats and mallinfo) have execution
   188   times that are bounded by a constant factor of the number of bits in
   189   a size_t, not counting any clearing in calloc or copying in realloc,
   190   or actions surrounding MORECORE and MMAP that have times
   191   proportional to the number of non-contiguous regions returned by
   192   system allocation routines, which is often just 1.
   193 
   194   The implementation is not very modular and seriously overuses
   195   macros. Perhaps someday all C compilers will do as good a job
   196   inlining modular code as can now be done by brute-force expansion,
   197   but now, enough of them seem not to.
   198 
   199   Some compilers issue a lot of warnings about code that is
   200   dead/unreachable only on some platforms, and also about intentional
   201   uses of negation on unsigned types. All known cases of each can be
   202   ignored.
   203 
   204   For a longer but out of date high-level description, see
   205      http://gee.cs.oswego.edu/dl/html/malloc.html
   206 
   207 * MSPACES
   208   If MSPACES is defined, then in addition to malloc, free, etc.,
   209   this file also defines mspace_malloc, mspace_free, etc. These
   210   are versions of malloc routines that take an "mspace" argument
   211   obtained using create_mspace, to control all internal bookkeeping.
   212   If ONLY_MSPACES is defined, only these versions are compiled.
   213   So if you would like to use this allocator for only some allocations,
   214   and your system malloc for others, you can compile with
   215   ONLY_MSPACES and then do something like...
   216     static mspace mymspace = create_mspace(0,0); // for example
   217     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   218 
   219   (Note: If you only need one instance of an mspace, you can instead
   220   use "USE_DL_PREFIX" to relabel the global malloc.)
   221 
   222   You can similarly create thread-local allocators by storing
   223   mspaces as thread-locals. For example:
   224     static __thread mspace tlms = 0;
   225     void*  tlmalloc(size_t bytes) {
   226       if (tlms == 0) tlms = create_mspace(0, 0);
   227       return mspace_malloc(tlms, bytes);
   228     }
   229     void  tlfree(void* mem) { mspace_free(tlms, mem); }
   230 
   231   Unless FOOTERS is defined, each mspace is completely independent.
   232   You cannot allocate from one and free to another (although
   233   conformance is only weakly checked, so usage errors are not always
   234   caught). If FOOTERS is defined, then each chunk carries around a tag
   235   indicating its originating mspace, and frees are directed to their
   236   originating spaces.
   237 
   238  -------------------------  Compile-time options ---------------------------
   239 
   240 Be careful in setting #define values for numerical constants of type
   241 size_t. On some systems, literal values are not automatically extended
   242 to size_t precision unless they are explicitly casted.
   243 
   244 WIN32                    default: defined if _WIN32 defined
   245   Defining WIN32 sets up defaults for MS environment and compilers.
   246   Otherwise defaults are for unix.
   247 
   248 MALLOC_ALIGNMENT         default: (size_t)8
   249   Controls the minimum alignment for malloc'ed chunks.  It must be a
   250   power of two and at least 8, even on machines for which smaller
   251   alignments would suffice. It may be defined as larger than this
   252   though. Note however that code and data structures are optimized for
   253   the case of 8-byte alignment.
   254 
   255 MSPACES                  default: 0 (false)
   256   If true, compile in support for independent allocation spaces.
   257   This is only supported if HAVE_MMAP is true.
   258 
   259 ONLY_MSPACES             default: 0 (false)
   260   If true, only compile in mspace versions, not regular versions.
   261 
   262 USE_LOCKS                default: 0 (false)
   263   Causes each call to each public routine to be surrounded with
   264   pthread or WIN32 mutex lock/unlock. (If set true, this can be
   265   overridden on a per-mspace basis for mspace versions.)
   266 
   267 FOOTERS                  default: 0
   268   If true, provide extra checking and dispatching by placing
   269   information in the footers of allocated chunks. This adds
   270   space and time overhead.
   271 
   272 INSECURE                 default: 0
   273   If true, omit checks for usage errors and heap space overwrites.
   274 
   275 USE_DL_PREFIX            default: NOT defined
   276   Causes compiler to prefix all public routines with the string 'dl'.
   277   This can be useful when you only want to use this malloc in one part
   278   of a program, using your regular system malloc elsewhere.
   279 
   280 ABORT                    default: defined as abort()
   281   Defines how to abort on failed checks.  On most systems, a failed
   282   check cannot die with an "assert" or even print an informative
   283   message, because the underlying print routines in turn call malloc,
   284   which will fail again.  Generally, the best policy is to simply call
   285   abort(). It's not very useful to do more than this because many
   286   errors due to overwriting will show up as address faults (null, odd
   287   addresses etc) rather than malloc-triggered checks, so will also
   288   abort.  Also, most compilers know that abort() does not return, so
   289   can better optimize code conditionally calling it.
   290 
   291 PROCEED_ON_ERROR           default: defined as 0 (false)
   292   Controls whether detected bad addresses cause them to bypassed
   293   rather than aborting. If set, detected bad arguments to free and
   294   realloc are ignored. And all bookkeeping information is zeroed out
   295   upon a detected overwrite of freed heap space, thus losing the
   296   ability to ever return it from malloc again, but enabling the
   297   application to proceed. If PROCEED_ON_ERROR is defined, the
   298   static variable malloc_corruption_error_count is compiled in
   299   and can be examined to see if errors have occurred. This option
   300   generates slower code than the default abort policy.
   301 
   302 DEBUG                    default: NOT defined
   303   The DEBUG setting is mainly intended for people trying to modify
   304   this code or diagnose problems when porting to new platforms.
   305   However, it may also be able to better isolate user errors than just
   306   using runtime checks.  The assertions in the check routines spell
   307   out in more detail the assumptions and invariants underlying the
   308   algorithms.  The checking is fairly extensive, and will slow down
   309   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   310   set will attempt to check every non-mmapped allocated and free chunk
   311   in the course of computing the summaries.
   312 
   313 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   314   Debugging assertion failures can be nearly impossible if your
   315   version of the assert macro causes malloc to be called, which will
   316   lead to a cascade of further failures, blowing the runtime stack.
   317   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   318   which will usually make debugging easier.
   319 
   320 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   321   The action to take before "return 0" when malloc fails to be able to
   322   return memory because there is none available.
   323 
   324 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   325   True if this system supports sbrk or an emulation of it.
   326 
   327 MORECORE                  default: sbrk
   328   The name of the sbrk-style system routine to call to obtain more
   329   memory.  See below for guidance on writing custom MORECORE
   330   functions. The type of the argument to sbrk/MORECORE varies across
   331   systems.  It cannot be size_t, because it supports negative
   332   arguments, so it is normally the signed type of the same width as
   333   size_t (sometimes declared as "intptr_t").  It doesn't much matter
   334   though. Internally, we only call it with arguments less than half
   335   the max value of a size_t, which should work across all reasonable
   336   possibilities, although sometimes generating compiler warnings.  See
   337   near the end of this file for guidelines for creating a custom
   338   version of MORECORE.
   339 
   340 MORECORE_CONTIGUOUS       default: 1 (true)
   341   If true, take advantage of fact that consecutive calls to MORECORE
   342   with positive arguments always return contiguous increasing
   343   addresses.  This is true of unix sbrk. It does not hurt too much to
   344   set it true anyway, since malloc copes with non-contiguities.
   345   Setting it false when definitely non-contiguous saves time
   346   and possibly wasted space it would take to discover this though.
   347 
   348 MORECORE_CANNOT_TRIM      default: NOT defined
   349   True if MORECORE cannot release space back to the system when given
   350   negative arguments. This is generally necessary only if you are
   351   using a hand-crafted MORECORE function that cannot handle negative
   352   arguments.
   353 
   354 HAVE_MMAP                 default: 1 (true)
   355   True if this system supports mmap or an emulation of it.  If so, and
   356   HAVE_MORECORE is not true, MMAP is used for all system
   357   allocation. If set and HAVE_MORECORE is true as well, MMAP is
   358   primarily used to directly allocate very large blocks. It is also
   359   used as a backup strategy in cases where MORECORE fails to provide
   360   space from system. Note: A single call to MUNMAP is assumed to be
   361   able to unmap memory that may have be allocated using multiple calls
   362   to MMAP, so long as they are adjacent.
   363 
   364 HAVE_MREMAP               default: 1 on linux, else 0
   365   If true realloc() uses mremap() to re-allocate large blocks and
   366   extend or shrink allocation spaces.
   367 
   368 MMAP_CLEARS               default: 1 on unix
   369   True if mmap clears memory so calloc doesn't need to. This is true
   370   for standard unix mmap using /dev/zero.
   371 
   372 USE_BUILTIN_FFS            default: 0 (i.e., not used)
   373   Causes malloc to use the builtin ffs() function to compute indices.
   374   Some compilers may recognize and intrinsify ffs to be faster than the
   375   supplied C version. Also, the case of x86 using gcc is special-cased
   376   to an asm instruction, so is already as fast as it can be, and so
   377   this setting has no effect. (On most x86s, the asm version is only
   378   slightly faster than the C version.)
   379 
   380 malloc_getpagesize         default: derive from system includes, or 4096.
   381   The system page size. To the extent possible, this malloc manages
   382   memory from the system in page-size units.  This may be (and
   383   usually is) a function rather than a constant. This is ignored
   384   if WIN32, where page size is determined using getSystemInfo during
   385   initialization.
   386 
   387 USE_DEV_RANDOM             default: 0 (i.e., not used)
   388   Causes malloc to use /dev/random to initialize secure magic seed for
   389   stamping footers. Otherwise, the current time is used.
   390 
   391 NO_MALLINFO                default: 0
   392   If defined, don't compile "mallinfo". This can be a simple way
   393   of dealing with mismatches between system declarations and
   394   those in this file.
   395 
   396 MALLINFO_FIELD_TYPE        default: size_t
   397   The type of the fields in the mallinfo struct. This was originally
   398   defined as "int" in SVID etc, but is more usefully defined as
   399   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   400 
   401 REALLOC_ZERO_BYTES_FREES    default: not defined
   402   This should be set if a call to realloc with zero bytes should 
   403   be the same as a call to free. Some people think it should. Otherwise, 
   404   since this malloc returns a unique pointer for malloc(0), so does 
   405   realloc(p, 0).
   406 
   407 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   408 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   409 LACKS_STDLIB_H                default: NOT defined unless on WIN32
   410   Define these if your system does not have these header files.
   411   You might need to manually insert some of the declarations they provide.
   412 
   413 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   414                                 system_info.dwAllocationGranularity in WIN32,
   415                                 otherwise 64K.
   416       Also settable using mallopt(M_GRANULARITY, x)
   417   The unit for allocating and deallocating memory from the system.  On
   418   most systems with contiguous MORECORE, there is no reason to
   419   make this more than a page. However, systems with MMAP tend to
   420   either require or encourage larger granularities.  You can increase
   421   this value to prevent system allocation functions to be called so
   422   often, especially if they are slow.  The value must be at least one
   423   page and must be a power of two.  Setting to 0 causes initialization
   424   to either page size or win32 region size.  (Note: In previous
   425   versions of malloc, the equivalent of this option was called
   426   "TOP_PAD")
   427 
   428 DEFAULT_TRIM_THRESHOLD    default: 2MB
   429       Also settable using mallopt(M_TRIM_THRESHOLD, x)
   430   The maximum amount of unused top-most memory to keep before
   431   releasing via malloc_trim in free().  Automatic trimming is mainly
   432   useful in long-lived programs using contiguous MORECORE.  Because
   433   trimming via sbrk can be slow on some systems, and can sometimes be
   434   wasteful (in cases where programs immediately afterward allocate
   435   more large chunks) the value should be high enough so that your
   436   overall system performance would improve by releasing this much
   437   memory.  As a rough guide, you might set to a value close to the
   438   average size of a process (program) running on your system.
   439   Releasing this much memory would allow such a process to run in
   440   memory.  Generally, it is worth tuning trim thresholds when a
   441   program undergoes phases where several large chunks are allocated
   442   and released in ways that can reuse each other's storage, perhaps
   443   mixed with phases where there are no such chunks at all. The trim
   444   value must be greater than page size to have any useful effect.  To
   445   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   446   some people use of mallocing a huge space and then freeing it at
   447   program startup, in an attempt to reserve system memory, doesn't
   448   have the intended effect under automatic trimming, since that memory
   449   will immediately be returned to the system.
   450 
   451 DEFAULT_MMAP_THRESHOLD       default: 256K
   452       Also settable using mallopt(M_MMAP_THRESHOLD, x)
   453   The request size threshold for using MMAP to directly service a
   454   request. Requests of at least this size that cannot be allocated
   455   using already-existing space will be serviced via mmap.  (If enough
   456   normal freed space already exists it is used instead.)  Using mmap
   457   segregates relatively large chunks of memory so that they can be
   458   individually obtained and released from the host system. A request
   459   serviced through mmap is never reused by any other request (at least
   460   not directly; the system may just so happen to remap successive
   461   requests to the same locations).  Segregating space in this way has
   462   the benefits that: Mmapped space can always be individually released
   463   back to the system, which helps keep the system level memory demands
   464   of a long-lived program low.  Also, mapped memory doesn't become
   465   `locked' between other chunks, as can happen with normally allocated
   466   chunks, which means that even trimming via malloc_trim would not
   467   release them.  However, it has the disadvantage that the space
   468   cannot be reclaimed, consolidated, and then used to service later
   469   requests, as happens with normal chunks.  The advantages of mmap
   470   nearly always outweigh disadvantages for "large" chunks, but the
   471   value of "large" may vary across systems.  The default is an
   472   empirically derived value that works well in most systems. You can
   473   disable mmap by setting to MAX_SIZE_T.
   474 
   475 */
   476 
   477 #ifndef WIN32
   478 #ifdef _WIN32
   479 #define WIN32 1
   480 #endif  /* _WIN32 */
   481 #endif  /* WIN32 */
   482 #ifdef WIN32
   483 #define WIN32_LEAN_AND_MEAN
   484 #include <windows.h>
   485 #define HAVE_MMAP 1
   486 #define HAVE_MORECORE 0
   487 #define LACKS_UNISTD_H
   488 #define LACKS_SYS_PARAM_H
   489 #define LACKS_SYS_MMAN_H
   490 #define LACKS_STRING_H
   491 #define LACKS_STRINGS_H
   492 #define LACKS_SYS_TYPES_H
   493 #define LACKS_ERRNO_H
   494 #define LACKS_FCNTL_H 
   495 #define MALLOC_FAILURE_ACTION
   496 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   497 #endif  /* WIN32 */
   498 
   499 #ifdef __OS2__
   500 #define INCL_DOS
   501 #include <os2.h>
   502 #define HAVE_MMAP 1
   503 #define HAVE_MORECORE 0
   504 #define LACKS_SYS_MMAN_H
   505 #endif  /* __OS2__ */
   506 
   507 #if defined(DARWIN) || defined(_DARWIN)
   508 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   509 #ifndef HAVE_MORECORE
   510 #define HAVE_MORECORE 0
   511 #define HAVE_MMAP 1
   512 #endif  /* HAVE_MORECORE */
   513 #endif  /* DARWIN */
   514 
   515 #ifndef LACKS_SYS_TYPES_H
   516 #include <sys/types.h>  /* For size_t */
   517 #endif  /* LACKS_SYS_TYPES_H */
   518 
   519 /* The maximum possible size_t value has all bits set */
   520 #define MAX_SIZE_T           (~(size_t)0)
   521 
   522 #ifndef ONLY_MSPACES
   523 #define ONLY_MSPACES 0
   524 #endif  /* ONLY_MSPACES */
   525 #ifndef MSPACES
   526 #if ONLY_MSPACES
   527 #define MSPACES 1
   528 #else   /* ONLY_MSPACES */
   529 #define MSPACES 0
   530 #endif  /* ONLY_MSPACES */
   531 #endif  /* MSPACES */
   532 #ifndef MALLOC_ALIGNMENT
   533 #define MALLOC_ALIGNMENT ((size_t)8U)
   534 #endif  /* MALLOC_ALIGNMENT */
   535 #ifndef FOOTERS
   536 #define FOOTERS 0
   537 #endif  /* FOOTERS */
   538 #ifndef ABORT
   539 #define ABORT  abort()
   540 #endif  /* ABORT */
   541 #ifndef ABORT_ON_ASSERT_FAILURE
   542 #define ABORT_ON_ASSERT_FAILURE 1
   543 #endif  /* ABORT_ON_ASSERT_FAILURE */
   544 #ifndef PROCEED_ON_ERROR
   545 #define PROCEED_ON_ERROR 0
   546 #endif  /* PROCEED_ON_ERROR */
   547 #ifndef USE_LOCKS
   548 #define USE_LOCKS 0
   549 #endif  /* USE_LOCKS */
   550 #ifndef INSECURE
   551 #define INSECURE 0
   552 #endif  /* INSECURE */
   553 #ifndef HAVE_MMAP
   554 #define HAVE_MMAP 1
   555 #endif  /* HAVE_MMAP */
   556 #ifndef MMAP_CLEARS
   557 #define MMAP_CLEARS 1
   558 #endif  /* MMAP_CLEARS */
   559 #ifndef HAVE_MREMAP
   560 #ifdef linux
   561 #define HAVE_MREMAP 1
   562 #else   /* linux */
   563 #define HAVE_MREMAP 0
   564 #endif  /* linux */
   565 #endif  /* HAVE_MREMAP */
   566 #ifndef MALLOC_FAILURE_ACTION
   567 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   568 #endif  /* MALLOC_FAILURE_ACTION */
   569 #ifndef HAVE_MORECORE
   570 #if ONLY_MSPACES
   571 #define HAVE_MORECORE 0
   572 #else   /* ONLY_MSPACES */
   573 #define HAVE_MORECORE 1
   574 #endif  /* ONLY_MSPACES */
   575 #endif  /* HAVE_MORECORE */
   576 #if !HAVE_MORECORE
   577 #define MORECORE_CONTIGUOUS 0
   578 #else   /* !HAVE_MORECORE */
   579 #ifndef MORECORE
   580 #define MORECORE sbrk
   581 #endif  /* MORECORE */
   582 #ifndef MORECORE_CONTIGUOUS
   583 #define MORECORE_CONTIGUOUS 1
   584 #endif  /* MORECORE_CONTIGUOUS */
   585 #endif  /* HAVE_MORECORE */
   586 #ifndef DEFAULT_GRANULARITY
   587 #if MORECORE_CONTIGUOUS
   588 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   589 #else   /* MORECORE_CONTIGUOUS */
   590 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   591 #endif  /* MORECORE_CONTIGUOUS */
   592 #endif  /* DEFAULT_GRANULARITY */
   593 #ifndef DEFAULT_TRIM_THRESHOLD
   594 #ifndef MORECORE_CANNOT_TRIM
   595 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   596 #else   /* MORECORE_CANNOT_TRIM */
   597 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   598 #endif  /* MORECORE_CANNOT_TRIM */
   599 #endif  /* DEFAULT_TRIM_THRESHOLD */
   600 #ifndef DEFAULT_MMAP_THRESHOLD
   601 #if HAVE_MMAP
   602 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   603 #else   /* HAVE_MMAP */
   604 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   605 #endif  /* HAVE_MMAP */
   606 #endif  /* DEFAULT_MMAP_THRESHOLD */
   607 #ifndef USE_BUILTIN_FFS
   608 #define USE_BUILTIN_FFS 0
   609 #endif  /* USE_BUILTIN_FFS */
   610 #ifndef USE_DEV_RANDOM
   611 #define USE_DEV_RANDOM 0
   612 #endif  /* USE_DEV_RANDOM */
   613 #ifndef NO_MALLINFO
   614 #define NO_MALLINFO 0
   615 #endif  /* NO_MALLINFO */
   616 #ifndef MALLINFO_FIELD_TYPE
   617 #define MALLINFO_FIELD_TYPE size_t
   618 #endif  /* MALLINFO_FIELD_TYPE */
   619 
   620 #define memset	SDL_memset
   621 #define memcpy	SDL_memcpy
   622 #define malloc	SDL_malloc
   623 #define calloc	SDL_calloc
   624 #define realloc	SDL_realloc
   625 #define free	SDL_free
   626 
   627 /*
   628   mallopt tuning options.  SVID/XPG defines four standard parameter
   629   numbers for mallopt, normally defined in malloc.h.  None of these
   630   are used in this malloc, so setting them has no effect. But this
   631   malloc does support the following options.
   632 */
   633 
   634 #define M_TRIM_THRESHOLD     (-1)
   635 #define M_GRANULARITY        (-2)
   636 #define M_MMAP_THRESHOLD     (-3)
   637 
   638 /* ------------------------ Mallinfo declarations ------------------------ */
   639 
   640 #if !NO_MALLINFO
   641 /*
   642   This version of malloc supports the standard SVID/XPG mallinfo
   643   routine that returns a struct containing usage properties and
   644   statistics. It should work on any system that has a
   645   /usr/include/malloc.h defining struct mallinfo.  The main
   646   declaration needed is the mallinfo struct that is returned (by-copy)
   647   by mallinfo().  The malloinfo struct contains a bunch of fields that
   648   are not even meaningful in this version of malloc.  These fields are
   649   are instead filled by mallinfo() with other numbers that might be of
   650   interest.
   651 
   652   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   653   /usr/include/malloc.h file that includes a declaration of struct
   654   mallinfo.  If so, it is included; else a compliant version is
   655   declared below.  These must be precisely the same for mallinfo() to
   656   work.  The original SVID version of this struct, defined on most
   657   systems with mallinfo, declares all fields as ints. But some others
   658   define as unsigned long. If your system defines the fields using a
   659   type of different width than listed here, you MUST #include your
   660   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   661 */
   662 
   663 /* #define HAVE_USR_INCLUDE_MALLOC_H */
   664 
   665 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   666 #include "/usr/include/malloc.h"
   667 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   668 
   669 struct mallinfo {
   670   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   671   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   672   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   673   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   674   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   675   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   676   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   677   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   678   MALLINFO_FIELD_TYPE fordblks; /* total free space */
   679   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   680 };
   681 
   682 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   683 #endif /* NO_MALLINFO */
   684 
   685 #ifdef __cplusplus
   686 extern "C" {
   687 #endif /* __cplusplus */
   688 
   689 #if !ONLY_MSPACES
   690 
   691 /* ------------------- Declarations of public routines ------------------- */
   692 
   693 #ifndef USE_DL_PREFIX
   694 #define dlcalloc               calloc
   695 #define dlfree                 free
   696 #define dlmalloc               malloc
   697 #define dlmemalign             memalign
   698 #define dlrealloc              realloc
   699 #define dlvalloc               valloc
   700 #define dlpvalloc              pvalloc
   701 #define dlmallinfo             mallinfo
   702 #define dlmallopt              mallopt
   703 #define dlmalloc_trim          malloc_trim
   704 #define dlmalloc_stats         malloc_stats
   705 #define dlmalloc_usable_size   malloc_usable_size
   706 #define dlmalloc_footprint     malloc_footprint
   707 #define dlmalloc_max_footprint malloc_max_footprint
   708 #define dlindependent_calloc   independent_calloc
   709 #define dlindependent_comalloc independent_comalloc
   710 #endif /* USE_DL_PREFIX */
   711 
   712 
   713 /*
   714   malloc(size_t n)
   715   Returns a pointer to a newly allocated chunk of at least n bytes, or
   716   null if no space is available, in which case errno is set to ENOMEM
   717   on ANSI C systems.
   718 
   719   If n is zero, malloc returns a minimum-sized chunk. (The minimum
   720   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   721   systems.)  Note that size_t is an unsigned type, so calls with
   722   arguments that would be negative if signed are interpreted as
   723   requests for huge amounts of space, which will often fail. The
   724   maximum supported value of n differs across systems, but is in all
   725   cases less than the maximum representable value of a size_t.
   726 */
   727 void* dlmalloc(size_t);
   728 
   729 /*
   730   free(void* p)
   731   Releases the chunk of memory pointed to by p, that had been previously
   732   allocated using malloc or a related routine such as realloc.
   733   It has no effect if p is null. If p was not malloced or already
   734   freed, free(p) will by default cause the current program to abort.
   735 */
   736 void  dlfree(void*);
   737 
   738 /*
   739   calloc(size_t n_elements, size_t element_size);
   740   Returns a pointer to n_elements * element_size bytes, with all locations
   741   set to zero.
   742 */
   743 void* dlcalloc(size_t, size_t);
   744 
   745 /*
   746   realloc(void* p, size_t n)
   747   Returns a pointer to a chunk of size n that contains the same data
   748   as does chunk p up to the minimum of (n, p's size) bytes, or null
   749   if no space is available.
   750 
   751   The returned pointer may or may not be the same as p. The algorithm
   752   prefers extending p in most cases when possible, otherwise it
   753   employs the equivalent of a malloc-copy-free sequence.
   754 
   755   If p is null, realloc is equivalent to malloc.
   756 
   757   If space is not available, realloc returns null, errno is set (if on
   758   ANSI) and p is NOT freed.
   759 
   760   if n is for fewer bytes than already held by p, the newly unused
   761   space is lopped off and freed if possible.  realloc with a size
   762   argument of zero (re)allocates a minimum-sized chunk.
   763 
   764   The old unix realloc convention of allowing the last-free'd chunk
   765   to be used as an argument to realloc is not supported.
   766 */
   767 
   768 void* dlrealloc(void*, size_t);
   769 
   770 /*
   771   memalign(size_t alignment, size_t n);
   772   Returns a pointer to a newly allocated chunk of n bytes, aligned
   773   in accord with the alignment argument.
   774 
   775   The alignment argument should be a power of two. If the argument is
   776   not a power of two, the nearest greater power is used.
   777   8-byte alignment is guaranteed by normal malloc calls, so don't
   778   bother calling memalign with an argument of 8 or less.
   779 
   780   Overreliance on memalign is a sure way to fragment space.
   781 */
   782 void* dlmemalign(size_t, size_t);
   783 
   784 /*
   785   valloc(size_t n);
   786   Equivalent to memalign(pagesize, n), where pagesize is the page
   787   size of the system. If the pagesize is unknown, 4096 is used.
   788 */
   789 void* dlvalloc(size_t);
   790 
   791 /*
   792   mallopt(int parameter_number, int parameter_value)
   793   Sets tunable parameters The format is to provide a
   794   (parameter-number, parameter-value) pair.  mallopt then sets the
   795   corresponding parameter to the argument value if it can (i.e., so
   796   long as the value is meaningful), and returns 1 if successful else
   797   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   798   normally defined in malloc.h.  None of these are use in this malloc,
   799   so setting them has no effect. But this malloc also supports other
   800   options in mallopt. See below for details.  Briefly, supported
   801   parameters are as follows (listed defaults are for "typical"
   802   configurations).
   803 
   804   Symbol            param #  default    allowed param values
   805   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   806   M_GRANULARITY        -2     page size   any power of 2 >= page size
   807   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   808 */
   809 int dlmallopt(int, int);
   810 
   811 /*
   812   malloc_footprint();
   813   Returns the number of bytes obtained from the system.  The total
   814   number of bytes allocated by malloc, realloc etc., is less than this
   815   value. Unlike mallinfo, this function returns only a precomputed
   816   result, so can be called frequently to monitor memory consumption.
   817   Even if locks are otherwise defined, this function does not use them,
   818   so results might not be up to date.
   819 */
   820 size_t dlmalloc_footprint(void);
   821 
   822 /*
   823   malloc_max_footprint();
   824   Returns the maximum number of bytes obtained from the system. This
   825   value will be greater than current footprint if deallocated space
   826   has been reclaimed by the system. The peak number of bytes allocated
   827   by malloc, realloc etc., is less than this value. Unlike mallinfo,
   828   this function returns only a precomputed result, so can be called
   829   frequently to monitor memory consumption.  Even if locks are
   830   otherwise defined, this function does not use them, so results might
   831   not be up to date.
   832 */
   833 size_t dlmalloc_max_footprint(void);
   834 
   835 #if !NO_MALLINFO
   836 /*
   837   mallinfo()
   838   Returns (by copy) a struct containing various summary statistics:
   839 
   840   arena:     current total non-mmapped bytes allocated from system
   841   ordblks:   the number of free chunks
   842   smblks:    always zero.
   843   hblks:     current number of mmapped regions
   844   hblkhd:    total bytes held in mmapped regions
   845   usmblks:   the maximum total allocated space. This will be greater
   846                 than current total if trimming has occurred.
   847   fsmblks:   always zero
   848   uordblks:  current total allocated space (normal or mmapped)
   849   fordblks:  total free space
   850   keepcost:  the maximum number of bytes that could ideally be released
   851                back to system via malloc_trim. ("ideally" means that
   852                it ignores page restrictions etc.)
   853 
   854   Because these fields are ints, but internal bookkeeping may
   855   be kept as longs, the reported values may wrap around zero and
   856   thus be inaccurate.
   857 */
   858 struct mallinfo dlmallinfo(void);
   859 #endif /* NO_MALLINFO */
   860 
   861 /*
   862   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   863 
   864   independent_calloc is similar to calloc, but instead of returning a
   865   single cleared space, it returns an array of pointers to n_elements
   866   independent elements that can hold contents of size elem_size, each
   867   of which starts out cleared, and can be independently freed,
   868   realloc'ed etc. The elements are guaranteed to be adjacently
   869   allocated (this is not guaranteed to occur with multiple callocs or
   870   mallocs), which may also improve cache locality in some
   871   applications.
   872 
   873   The "chunks" argument is optional (i.e., may be null, which is
   874   probably the most typical usage). If it is null, the returned array
   875   is itself dynamically allocated and should also be freed when it is
   876   no longer needed. Otherwise, the chunks array must be of at least
   877   n_elements in length. It is filled in with the pointers to the
   878   chunks.
   879 
   880   In either case, independent_calloc returns this pointer array, or
   881   null if the allocation failed.  If n_elements is zero and "chunks"
   882   is null, it returns a chunk representing an array with zero elements
   883   (which should be freed if not wanted).
   884 
   885   Each element must be individually freed when it is no longer
   886   needed. If you'd like to instead be able to free all at once, you
   887   should instead use regular calloc and assign pointers into this
   888   space to represent elements.  (In this case though, you cannot
   889   independently free elements.)
   890 
   891   independent_calloc simplifies and speeds up implementations of many
   892   kinds of pools.  It may also be useful when constructing large data
   893   structures that initially have a fixed number of fixed-sized nodes,
   894   but the number is not known at compile time, and some of the nodes
   895   may later need to be freed. For example:
   896 
   897   struct Node { int item; struct Node* next; };
   898 
   899   struct Node* build_list() {
   900     struct Node** pool;
   901     int n = read_number_of_nodes_needed();
   902     if (n <= 0) return 0;
   903     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   904     if (pool == 0) die();
   905     // organize into a linked list...
   906     struct Node* first = pool[0];
   907     for (i = 0; i < n-1; ++i)
   908       pool[i]->next = pool[i+1];
   909     free(pool);     // Can now free the array (or not, if it is needed later)
   910     return first;
   911   }
   912 */
   913 void** dlindependent_calloc(size_t, size_t, void**);
   914 
   915 /*
   916   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   917 
   918   independent_comalloc allocates, all at once, a set of n_elements
   919   chunks with sizes indicated in the "sizes" array.    It returns
   920   an array of pointers to these elements, each of which can be
   921   independently freed, realloc'ed etc. The elements are guaranteed to
   922   be adjacently allocated (this is not guaranteed to occur with
   923   multiple callocs or mallocs), which may also improve cache locality
   924   in some applications.
   925 
   926   The "chunks" argument is optional (i.e., may be null). If it is null
   927   the returned array is itself dynamically allocated and should also
   928   be freed when it is no longer needed. Otherwise, the chunks array
   929   must be of at least n_elements in length. It is filled in with the
   930   pointers to the chunks.
   931 
   932   In either case, independent_comalloc returns this pointer array, or
   933   null if the allocation failed.  If n_elements is zero and chunks is
   934   null, it returns a chunk representing an array with zero elements
   935   (which should be freed if not wanted).
   936 
   937   Each element must be individually freed when it is no longer
   938   needed. If you'd like to instead be able to free all at once, you
   939   should instead use a single regular malloc, and assign pointers at
   940   particular offsets in the aggregate space. (In this case though, you
   941   cannot independently free elements.)
   942 
   943   independent_comallac differs from independent_calloc in that each
   944   element may have a different size, and also that it does not
   945   automatically clear elements.
   946 
   947   independent_comalloc can be used to speed up allocation in cases
   948   where several structs or objects must always be allocated at the
   949   same time.  For example:
   950 
   951   struct Head { ... }
   952   struct Foot { ... }
   953 
   954   void send_message(char* msg) {
   955     int msglen = strlen(msg);
   956     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   957     void* chunks[3];
   958     if (independent_comalloc(3, sizes, chunks) == 0)
   959       die();
   960     struct Head* head = (struct Head*)(chunks[0]);
   961     char*        body = (char*)(chunks[1]);
   962     struct Foot* foot = (struct Foot*)(chunks[2]);
   963     // ...
   964   }
   965 
   966   In general though, independent_comalloc is worth using only for
   967   larger values of n_elements. For small values, you probably won't
   968   detect enough difference from series of malloc calls to bother.
   969 
   970   Overuse of independent_comalloc can increase overall memory usage,
   971   since it cannot reuse existing noncontiguous small chunks that
   972   might be available for some of the elements.
   973 */
   974 void** dlindependent_comalloc(size_t, size_t*, void**);
   975 
   976 
   977 /*
   978   pvalloc(size_t n);
   979   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   980   round up n to nearest pagesize.
   981  */
   982 void*  dlpvalloc(size_t);
   983 
   984 /*
   985   malloc_trim(size_t pad);
   986 
   987   If possible, gives memory back to the system (via negative arguments
   988   to sbrk) if there is unused memory at the `high' end of the malloc
   989   pool or in unused MMAP segments. You can call this after freeing
   990   large blocks of memory to potentially reduce the system-level memory
   991   requirements of a program. However, it cannot guarantee to reduce
   992   memory. Under some allocation patterns, some large free blocks of
   993   memory will be locked between two used chunks, so they cannot be
   994   given back to the system.
   995 
   996   The `pad' argument to malloc_trim represents the amount of free
   997   trailing space to leave untrimmed. If this argument is zero, only
   998   the minimum amount of memory to maintain internal data structures
   999   will be left. Non-zero arguments can be supplied to maintain enough
  1000   trailing space to service future expected allocations without having
  1001   to re-obtain memory from the system.
  1002 
  1003   Malloc_trim returns 1 if it actually released any memory, else 0.
  1004 */
  1005 int  dlmalloc_trim(size_t);
  1006 
  1007 /*
  1008   malloc_usable_size(void* p);
  1009 
  1010   Returns the number of bytes you can actually use in
  1011   an allocated chunk, which may be more than you requested (although
  1012   often not) due to alignment and minimum size constraints.
  1013   You can use this many bytes without worrying about
  1014   overwriting other allocated objects. This is not a particularly great
  1015   programming practice. malloc_usable_size can be more useful in
  1016   debugging and assertions, for example:
  1017 
  1018   p = malloc(n);
  1019   assert(malloc_usable_size(p) >= 256);
  1020 */
  1021 size_t dlmalloc_usable_size(void*);
  1022 
  1023 /*
  1024   malloc_stats();
  1025   Prints on stderr the amount of space obtained from the system (both
  1026   via sbrk and mmap), the maximum amount (which may be more than
  1027   current if malloc_trim and/or munmap got called), and the current
  1028   number of bytes allocated via malloc (or realloc, etc) but not yet
  1029   freed. Note that this is the number of bytes allocated, not the
  1030   number requested. It will be larger than the number requested
  1031   because of alignment and bookkeeping overhead. Because it includes
  1032   alignment wastage as being in use, this figure may be greater than
  1033   zero even when no user-level chunks are allocated.
  1034 
  1035   The reported current and maximum system memory can be inaccurate if
  1036   a program makes other calls to system memory allocation functions
  1037   (normally sbrk) outside of malloc.
  1038 
  1039   malloc_stats prints only the most commonly interesting statistics.
  1040   More information can be obtained by calling mallinfo.
  1041 */
  1042 void  dlmalloc_stats(void);
  1043 
  1044 #endif /* ONLY_MSPACES */
  1045 
  1046 #if MSPACES
  1047 
  1048 /*
  1049   mspace is an opaque type representing an independent
  1050   region of space that supports mspace_malloc, etc.
  1051 */
  1052 typedef void* mspace;
  1053 
  1054 /*
  1055   create_mspace creates and returns a new independent space with the
  1056   given initial capacity, or, if 0, the default granularity size.  It
  1057   returns null if there is no system memory available to create the
  1058   space.  If argument locked is non-zero, the space uses a separate
  1059   lock to control access. The capacity of the space will grow
  1060   dynamically as needed to service mspace_malloc requests.  You can
  1061   control the sizes of incremental increases of this space by
  1062   compiling with a different DEFAULT_GRANULARITY or dynamically
  1063   setting with mallopt(M_GRANULARITY, value).
  1064 */
  1065 mspace create_mspace(size_t capacity, int locked);
  1066 
  1067 /*
  1068   destroy_mspace destroys the given space, and attempts to return all
  1069   of its memory back to the system, returning the total number of
  1070   bytes freed. After destruction, the results of access to all memory
  1071   used by the space become undefined.
  1072 */
  1073 size_t destroy_mspace(mspace msp);
  1074 
  1075 /*
  1076   create_mspace_with_base uses the memory supplied as the initial base
  1077   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1078   space is used for bookkeeping, so the capacity must be at least this
  1079   large. (Otherwise 0 is returned.) When this initial space is
  1080   exhausted, additional memory will be obtained from the system.
  1081   Destroying this space will deallocate all additionally allocated
  1082   space (if possible) but not the initial base.
  1083 */
  1084 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1085 
  1086 /*
  1087   mspace_malloc behaves as malloc, but operates within
  1088   the given space.
  1089 */
  1090 void* mspace_malloc(mspace msp, size_t bytes);
  1091 
  1092 /*
  1093   mspace_free behaves as free, but operates within
  1094   the given space.
  1095 
  1096   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1097   free may be called instead of mspace_free because freed chunks from
  1098   any space are handled by their originating spaces.
  1099 */
  1100 void mspace_free(mspace msp, void* mem);
  1101 
  1102 /*
  1103   mspace_realloc behaves as realloc, but operates within
  1104   the given space.
  1105 
  1106   If compiled with FOOTERS==1, mspace_realloc is not actually
  1107   needed.  realloc may be called instead of mspace_realloc because
  1108   realloced chunks from any space are handled by their originating
  1109   spaces.
  1110 */
  1111 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1112 
  1113 /*
  1114   mspace_calloc behaves as calloc, but operates within
  1115   the given space.
  1116 */
  1117 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1118 
  1119 /*
  1120   mspace_memalign behaves as memalign, but operates within
  1121   the given space.
  1122 */
  1123 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1124 
  1125 /*
  1126   mspace_independent_calloc behaves as independent_calloc, but
  1127   operates within the given space.
  1128 */
  1129 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1130                                  size_t elem_size, void* chunks[]);
  1131 
  1132 /*
  1133   mspace_independent_comalloc behaves as independent_comalloc, but
  1134   operates within the given space.
  1135 */
  1136 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1137                                    size_t sizes[], void* chunks[]);
  1138 
  1139 /*
  1140   mspace_footprint() returns the number of bytes obtained from the
  1141   system for this space.
  1142 */
  1143 size_t mspace_footprint(mspace msp);
  1144 
  1145 /*
  1146   mspace_max_footprint() returns the peak number of bytes obtained from the
  1147   system for this space.
  1148 */
  1149 size_t mspace_max_footprint(mspace msp);
  1150 
  1151 
  1152 #if !NO_MALLINFO
  1153 /*
  1154   mspace_mallinfo behaves as mallinfo, but reports properties of
  1155   the given space.
  1156 */
  1157 struct mallinfo mspace_mallinfo(mspace msp);
  1158 #endif /* NO_MALLINFO */
  1159 
  1160 /*
  1161   mspace_malloc_stats behaves as malloc_stats, but reports
  1162   properties of the given space.
  1163 */
  1164 void mspace_malloc_stats(mspace msp);
  1165 
  1166 /*
  1167   mspace_trim behaves as malloc_trim, but
  1168   operates within the given space.
  1169 */
  1170 int mspace_trim(mspace msp, size_t pad);
  1171 
  1172 /*
  1173   An alias for mallopt.
  1174 */
  1175 int mspace_mallopt(int, int);
  1176 
  1177 #endif /* MSPACES */
  1178 
  1179 #ifdef __cplusplus
  1180 };  /* end of extern "C" */
  1181 #endif /* __cplusplus */
  1182 
  1183 /*
  1184   ========================================================================
  1185   To make a fully customizable malloc.h header file, cut everything
  1186   above this line, put into file malloc.h, edit to suit, and #include it
  1187   on the next line, as well as in programs that use this malloc.
  1188   ========================================================================
  1189 */
  1190 
  1191 /* #include "malloc.h" */
  1192 
  1193 /*------------------------------ internal #includes ---------------------- */
  1194 
  1195 #ifdef _MSC_VER
  1196 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1197 #endif /* _MSC_VER */
  1198 
  1199 #ifndef LACKS_STDIO_H
  1200 #include <stdio.h>       /* for printing in malloc_stats */
  1201 #endif
  1202 
  1203 #ifndef LACKS_ERRNO_H
  1204 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1205 #endif /* LACKS_ERRNO_H */
  1206 #if FOOTERS
  1207 #include <time.h>        /* for magic initialization */
  1208 #endif /* FOOTERS */
  1209 #ifndef LACKS_STDLIB_H
  1210 #include <stdlib.h>      /* for abort() */
  1211 #endif /* LACKS_STDLIB_H */
  1212 #ifdef DEBUG
  1213 #if ABORT_ON_ASSERT_FAILURE
  1214 #define assert(x) if(!(x)) ABORT
  1215 #else /* ABORT_ON_ASSERT_FAILURE */
  1216 #include <assert.h>
  1217 #endif /* ABORT_ON_ASSERT_FAILURE */
  1218 #else  /* DEBUG */
  1219 #define assert(x)
  1220 #endif /* DEBUG */
  1221 #ifndef LACKS_STRING_H
  1222 #include <string.h>      /* for memset etc */
  1223 #endif  /* LACKS_STRING_H */
  1224 #if USE_BUILTIN_FFS
  1225 #ifndef LACKS_STRINGS_H
  1226 #include <strings.h>     /* for ffs */
  1227 #endif /* LACKS_STRINGS_H */
  1228 #endif /* USE_BUILTIN_FFS */
  1229 #if HAVE_MMAP
  1230 #ifndef LACKS_SYS_MMAN_H
  1231 #include <sys/mman.h>    /* for mmap */
  1232 #endif /* LACKS_SYS_MMAN_H */
  1233 #ifndef LACKS_FCNTL_H
  1234 #include <fcntl.h>
  1235 #endif /* LACKS_FCNTL_H */
  1236 #endif /* HAVE_MMAP */
  1237 #if HAVE_MORECORE
  1238 #ifndef LACKS_UNISTD_H
  1239 #include <unistd.h>     /* for sbrk */
  1240 #else /* LACKS_UNISTD_H */
  1241 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1242 extern void*     sbrk(ptrdiff_t);
  1243 #endif /* FreeBSD etc */
  1244 #endif /* LACKS_UNISTD_H */
  1245 #endif /* HAVE_MMAP */
  1246 
  1247 #ifndef WIN32
  1248 #ifndef malloc_getpagesize
  1249 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1250 #    ifndef _SC_PAGE_SIZE
  1251 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1252 #    endif
  1253 #  endif
  1254 #  ifdef _SC_PAGE_SIZE
  1255 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1256 #  else
  1257 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1258        extern size_t getpagesize();
  1259 #      define malloc_getpagesize getpagesize()
  1260 #    else
  1261 #      ifdef WIN32 /* use supplied emulation of getpagesize */
  1262 #        define malloc_getpagesize getpagesize()
  1263 #      else
  1264 #        ifndef LACKS_SYS_PARAM_H
  1265 #          include <sys/param.h>
  1266 #        endif
  1267 #        ifdef EXEC_PAGESIZE
  1268 #          define malloc_getpagesize EXEC_PAGESIZE
  1269 #        else
  1270 #          ifdef NBPG
  1271 #            ifndef CLSIZE
  1272 #              define malloc_getpagesize NBPG
  1273 #            else
  1274 #              define malloc_getpagesize (NBPG * CLSIZE)
  1275 #            endif
  1276 #          else
  1277 #            ifdef NBPC
  1278 #              define malloc_getpagesize NBPC
  1279 #            else
  1280 #              ifdef PAGESIZE
  1281 #                define malloc_getpagesize PAGESIZE
  1282 #              else /* just guess */
  1283 #                define malloc_getpagesize ((size_t)4096U)
  1284 #              endif
  1285 #            endif
  1286 #          endif
  1287 #        endif
  1288 #      endif
  1289 #    endif
  1290 #  endif
  1291 #endif
  1292 #endif
  1293 
  1294 /* ------------------- size_t and alignment properties -------------------- */
  1295 
  1296 /* The byte and bit size of a size_t */
  1297 #define SIZE_T_SIZE         (sizeof(size_t))
  1298 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1299 
  1300 /* Some constants coerced to size_t */
  1301 /* Annoying but necessary to avoid errors on some plaftorms */
  1302 #define SIZE_T_ZERO         ((size_t)0)
  1303 #define SIZE_T_ONE          ((size_t)1)
  1304 #define SIZE_T_TWO          ((size_t)2)
  1305 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1306 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1307 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1308 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1309 
  1310 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1311 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1312 
  1313 /* True if address a has acceptable alignment */
  1314 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1315 
  1316 /* the number of bytes to offset an address to align it */
  1317 #define align_offset(A)\
  1318  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1319   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1320 
  1321 /* -------------------------- MMAP preliminaries ------------------------- */
  1322 
  1323 /*
  1324    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1325    checks to fail so compiler optimizer can delete code rather than
  1326    using so many "#if"s.
  1327 */
  1328 
  1329 
  1330 /* MORECORE and MMAP must return MFAIL on failure */
  1331 #define MFAIL                ((void*)(MAX_SIZE_T))
  1332 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1333 
  1334 #if !HAVE_MMAP
  1335 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1336 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1337 #define CALL_MMAP(s)         MFAIL
  1338 #define CALL_MUNMAP(a, s)    (-1)
  1339 #define DIRECT_MMAP(s)       MFAIL
  1340 
  1341 #else /* HAVE_MMAP */
  1342 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1343 #define USE_MMAP_BIT         (SIZE_T_ONE)
  1344 
  1345 #if !defined(WIN32) && !defined (__OS2__)
  1346 #define CALL_MUNMAP(a, s)    munmap((a), (s))
  1347 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1348 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1349 #define MAP_ANONYMOUS        MAP_ANON
  1350 #endif /* MAP_ANON */
  1351 #ifdef MAP_ANONYMOUS
  1352 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1353 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1354 #else /* MAP_ANONYMOUS */
  1355 /*
  1356    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1357    is unlikely to be needed, but is supplied just in case.
  1358 */
  1359 #define MMAP_FLAGS           (MAP_PRIVATE)
  1360 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1361 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1362            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1363             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1364             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1365 #endif /* MAP_ANONYMOUS */
  1366 
  1367 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1368 
  1369 #elif defined(__OS2__)
  1370 
  1371 /* OS/2 MMAP via DosAllocMem */
  1372 static void* os2mmap(size_t size) {
  1373   void* ptr;
  1374   if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
  1375       DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE))
  1376     return MFAIL;
  1377   return ptr;
  1378 }
  1379 
  1380 #define os2direct_mmap(n)     os2mmap(n)
  1381 
  1382 /* This function supports releasing coalesed segments */
  1383 static int os2munmap(void* ptr, size_t size) {
  1384   while (size) {
  1385     ULONG ulSize = size;
  1386     ULONG ulFlags = 0;
  1387     if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0)
  1388       return -1;
  1389     if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
  1390         ulSize > size)
  1391       return -1;
  1392     if (DosFreeMem(ptr) != 0)
  1393       return -1;
  1394     ptr = ( void * ) ( ( char * ) ptr + ulSize );
  1395     size -= ulSize;
  1396   }
  1397   return 0;
  1398 }
  1399 
  1400 #define CALL_MMAP(s)         os2mmap(s)
  1401 #define CALL_MUNMAP(a, s)    os2munmap((a), (s))
  1402 #define DIRECT_MMAP(s)       os2direct_mmap(s)
  1403 
  1404 #else /* WIN32 */
  1405 
  1406 /* Win32 MMAP via VirtualAlloc */
  1407 static void* win32mmap(size_t size) {
  1408   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1409   return (ptr != 0)? ptr: MFAIL;
  1410 }
  1411 
  1412 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1413 static void* win32direct_mmap(size_t size) {
  1414   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1415                            PAGE_READWRITE);
  1416   return (ptr != 0)? ptr: MFAIL;
  1417 }
  1418 
  1419 /* This function supports releasing coalesed segments */
  1420 static int win32munmap(void* ptr, size_t size) {
  1421   MEMORY_BASIC_INFORMATION minfo;
  1422   char* cptr = ptr;
  1423   while (size) {
  1424     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1425       return -1;
  1426     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1427         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1428       return -1;
  1429     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1430       return -1;
  1431     cptr += minfo.RegionSize;
  1432     size -= minfo.RegionSize;
  1433   }
  1434   return 0;
  1435 }
  1436 
  1437 #define CALL_MMAP(s)         win32mmap(s)
  1438 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1439 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1440 #endif /* WIN32 */
  1441 #endif /* HAVE_MMAP */
  1442 
  1443 #if HAVE_MMAP && HAVE_MREMAP
  1444 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1445 #else  /* HAVE_MMAP && HAVE_MREMAP */
  1446 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1447 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1448 
  1449 #if HAVE_MORECORE
  1450 #define CALL_MORECORE(S)     MORECORE(S)
  1451 #else  /* HAVE_MORECORE */
  1452 #define CALL_MORECORE(S)     MFAIL
  1453 #endif /* HAVE_MORECORE */
  1454 
  1455 /* mstate bit set if continguous morecore disabled or failed */
  1456 #define USE_NONCONTIGUOUS_BIT (4U)
  1457 
  1458 /* segment bit set in create_mspace_with_base */
  1459 #define EXTERN_BIT            (8U)
  1460 
  1461 
  1462 /* --------------------------- Lock preliminaries ------------------------ */
  1463 
  1464 #if USE_LOCKS
  1465 
  1466 /*
  1467   When locks are defined, there are up to two global locks:
  1468 
  1469   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1470     MORECORE.  In many cases sys_alloc requires two calls, that should
  1471     not be interleaved with calls by other threads.  This does not
  1472     protect against direct calls to MORECORE by other threads not
  1473     using this lock, so there is still code to cope the best we can on
  1474     interference.
  1475 
  1476   * magic_init_mutex ensures that mparams.magic and other
  1477     unique mparams values are initialized only once.
  1478 */
  1479 
  1480 #if !defined(WIN32) && !defined(__OS2__)
  1481 /* By default use posix locks */
  1482 #include <pthread.h>
  1483 #define MLOCK_T pthread_mutex_t
  1484 #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1485 #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1486 #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1487 
  1488 #if HAVE_MORECORE
  1489 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1490 #endif /* HAVE_MORECORE */
  1491 
  1492 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1493 
  1494 #elif defined(__OS2__)
  1495 #define MLOCK_T HMTX
  1496 #define INITIAL_LOCK(l)      DosCreateMutexSem(0, l, 0, FALSE)
  1497 #define ACQUIRE_LOCK(l)      DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
  1498 #define RELEASE_LOCK(l)      DosReleaseMutexSem(*l)
  1499 #if HAVE_MORECORE
  1500 static MLOCK_T morecore_mutex;
  1501 #endif /* HAVE_MORECORE */
  1502 static MLOCK_T magic_init_mutex;
  1503 
  1504 #else /* WIN32 */
  1505 /*
  1506    Because lock-protected regions have bounded times, and there
  1507    are no recursive lock calls, we can use simple spinlocks.
  1508 */
  1509 
  1510 #define MLOCK_T long
  1511 static int win32_acquire_lock (MLOCK_T *sl) {
  1512   for (;;) {
  1513 #ifdef InterlockedCompareExchangePointer
  1514     if (!InterlockedCompareExchange(sl, 1, 0))
  1515       return 0;
  1516 #else  /* Use older void* version */
  1517     if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1518       return 0;
  1519 #endif /* InterlockedCompareExchangePointer */
  1520     Sleep (0);
  1521   }
  1522 }
  1523 
  1524 static void win32_release_lock (MLOCK_T *sl) {
  1525   InterlockedExchange (sl, 0);
  1526 }
  1527 
  1528 #define INITIAL_LOCK(l)      *(l)=0
  1529 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1530 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1531 #if HAVE_MORECORE
  1532 static MLOCK_T morecore_mutex;
  1533 #endif /* HAVE_MORECORE */
  1534 static MLOCK_T magic_init_mutex;
  1535 #endif /* WIN32 */
  1536 
  1537 #define USE_LOCK_BIT               (2U)
  1538 #else  /* USE_LOCKS */
  1539 #define USE_LOCK_BIT               (0U)
  1540 #define INITIAL_LOCK(l)
  1541 #endif /* USE_LOCKS */
  1542 
  1543 #if USE_LOCKS && HAVE_MORECORE
  1544 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1545 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1546 #else /* USE_LOCKS && HAVE_MORECORE */
  1547 #define ACQUIRE_MORECORE_LOCK()
  1548 #define RELEASE_MORECORE_LOCK()
  1549 #endif /* USE_LOCKS && HAVE_MORECORE */
  1550 
  1551 #if USE_LOCKS
  1552 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1553 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1554 #else  /* USE_LOCKS */
  1555 #define ACQUIRE_MAGIC_INIT_LOCK()
  1556 #define RELEASE_MAGIC_INIT_LOCK()
  1557 #endif /* USE_LOCKS */
  1558 
  1559 
  1560 /* -----------------------  Chunk representations ------------------------ */
  1561 
  1562 /*
  1563   (The following includes lightly edited explanations by Colin Plumb.)
  1564 
  1565   The malloc_chunk declaration below is misleading (but accurate and
  1566   necessary).  It declares a "view" into memory allowing access to
  1567   necessary fields at known offsets from a given base.
  1568 
  1569   Chunks of memory are maintained using a `boundary tag' method as
  1570   originally described by Knuth.  (See the paper by Paul Wilson
  1571   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1572   techniques.)  Sizes of free chunks are stored both in the front of
  1573   each chunk and at the end.  This makes consolidating fragmented
  1574   chunks into bigger chunks fast.  The head fields also hold bits
  1575   representing whether chunks are free or in use.
  1576 
  1577   Here are some pictures to make it clearer.  They are "exploded" to
  1578   show that the state of a chunk can be thought of as extending from
  1579   the high 31 bits of the head field of its header through the
  1580   prev_foot and PINUSE_BIT bit of the following chunk header.
  1581 
  1582   A chunk that's in use looks like:
  1583 
  1584    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1585            | Size of previous chunk (if P = 1)                             |
  1586            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1587          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1588          | Size of this chunk                                         1| +-+
  1589    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1590          |                                                               |
  1591          +-                                                             -+
  1592          |                                                               |
  1593          +-                                                             -+
  1594          |                                                               :
  1595          +-      size - sizeof(size_t) available payload bytes          -+
  1596          :                                                               |
  1597  chunk-> +-                                                             -+
  1598          |                                                               |
  1599          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1600        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1601        | Size of next chunk (may or may not be in use)               | +-+
  1602  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1603 
  1604     And if it's free, it looks like this:
  1605 
  1606    chunk-> +-                                                             -+
  1607            | User payload (must be in use, or we would have merged!)       |
  1608            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1609          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1610          | Size of this chunk                                         0| +-+
  1611    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1612          | Next pointer                                                  |
  1613          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1614          | Prev pointer                                                  |
  1615          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1616          |                                                               :
  1617          +-      size - sizeof(struct chunk) unused bytes               -+
  1618          :                                                               |
  1619  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1620          | Size of this chunk                                            |
  1621          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1622        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1623        | Size of next chunk (must be in use, or we would have merged)| +-+
  1624  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1625        |                                                               :
  1626        +- User payload                                                -+
  1627        :                                                               |
  1628        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1629                                                                      |0|
  1630                                                                      +-+
  1631   Note that since we always merge adjacent free chunks, the chunks
  1632   adjacent to a free chunk must be in use.
  1633 
  1634   Given a pointer to a chunk (which can be derived trivially from the
  1635   payload pointer) we can, in O(1) time, find out whether the adjacent
  1636   chunks are free, and if so, unlink them from the lists that they
  1637   are on and merge them with the current chunk.
  1638 
  1639   Chunks always begin on even word boundaries, so the mem portion
  1640   (which is returned to the user) is also on an even word boundary, and
  1641   thus at least double-word aligned.
  1642 
  1643   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1644   chunk size (which is always a multiple of two words), is an in-use
  1645   bit for the *previous* chunk.  If that bit is *clear*, then the
  1646   word before the current chunk size contains the previous chunk
  1647   size, and can be used to find the front of the previous chunk.
  1648   The very first chunk allocated always has this bit set, preventing
  1649   access to non-existent (or non-owned) memory. If pinuse is set for
  1650   any given chunk, then you CANNOT determine the size of the
  1651   previous chunk, and might even get a memory addressing fault when
  1652   trying to do so.
  1653 
  1654   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1655   the chunk size redundantly records whether the current chunk is
  1656   inuse. This redundancy enables usage checks within free and realloc,
  1657   and reduces indirection when freeing and consolidating chunks.
  1658 
  1659   Each freshly allocated chunk must have both cinuse and pinuse set.
  1660   That is, each allocated chunk borders either a previously allocated
  1661   and still in-use chunk, or the base of its memory arena. This is
  1662   ensured by making all allocations from the the `lowest' part of any
  1663   found chunk.  Further, no free chunk physically borders another one,
  1664   so each free chunk is known to be preceded and followed by either
  1665   inuse chunks or the ends of memory.
  1666 
  1667   Note that the `foot' of the current chunk is actually represented
  1668   as the prev_foot of the NEXT chunk. This makes it easier to
  1669   deal with alignments etc but can be very confusing when trying
  1670   to extend or adapt this code.
  1671 
  1672   The exceptions to all this are
  1673 
  1674      1. The special chunk `top' is the top-most available chunk (i.e.,
  1675         the one bordering the end of available memory). It is treated
  1676         specially.  Top is never included in any bin, is used only if
  1677         no other chunk is available, and is released back to the
  1678         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1679         the top chunk is treated as larger (and thus less well
  1680         fitting) than any other available chunk.  The top chunk
  1681         doesn't update its trailing size field since there is no next
  1682         contiguous chunk that would have to index off it. However,
  1683         space is still allocated for it (TOP_FOOT_SIZE) to enable
  1684         separation or merging when space is extended.
  1685 
  1686      3. Chunks allocated via mmap, which have the lowest-order bit
  1687         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1688         PINUSE_BIT in their head fields.  Because they are allocated
  1689         one-by-one, each must carry its own prev_foot field, which is
  1690         also used to hold the offset this chunk has within its mmapped
  1691         region, which is needed to preserve alignment. Each mmapped
  1692         chunk is trailed by the first two fields of a fake next-chunk
  1693         for sake of usage checks.
  1694 
  1695 */
  1696 
  1697 struct malloc_chunk {
  1698   size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1699   size_t               head;       /* Size and inuse bits. */
  1700   struct malloc_chunk* fd;         /* double links -- used only if free. */
  1701   struct malloc_chunk* bk;
  1702 };
  1703 
  1704 typedef struct malloc_chunk  mchunk;
  1705 typedef struct malloc_chunk* mchunkptr;
  1706 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1707 typedef size_t bindex_t;               /* Described below */
  1708 typedef unsigned int binmap_t;         /* Described below */
  1709 typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1710 
  1711 /* ------------------- Chunks sizes and alignments ----------------------- */
  1712 
  1713 #define MCHUNK_SIZE         (sizeof(mchunk))
  1714 
  1715 #if FOOTERS
  1716 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1717 #else /* FOOTERS */
  1718 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1719 #endif /* FOOTERS */
  1720 
  1721 /* MMapped chunks need a second word of overhead ... */
  1722 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1723 /* ... and additional padding for fake next-chunk at foot */
  1724 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1725 
  1726 /* The smallest size we can malloc is an aligned minimal chunk */
  1727 #define MIN_CHUNK_SIZE\
  1728   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1729 
  1730 /* conversion from malloc headers to user pointers, and back */
  1731 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1732 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1733 /* chunk associated with aligned address A */
  1734 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1735 
  1736 /* Bounds on request (not chunk) sizes. */
  1737 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1738 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1739 
  1740 /* pad request bytes into a usable size */
  1741 #define pad_request(req) \
  1742    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1743 
  1744 /* pad request, checking for minimum (but not maximum) */
  1745 #define request2size(req) \
  1746   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1747 
  1748 
  1749 /* ------------------ Operations on head and foot fields ----------------- */
  1750 
  1751 /*
  1752   The head field of a chunk is or'ed with PINUSE_BIT when previous
  1753   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1754   use. If the chunk was obtained with mmap, the prev_foot field has
  1755   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1756   mmapped region to the base of the chunk.
  1757 */
  1758 
  1759 #define PINUSE_BIT          (SIZE_T_ONE)
  1760 #define CINUSE_BIT          (SIZE_T_TWO)
  1761 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1762 
  1763 /* Head value for fenceposts */
  1764 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1765 
  1766 /* extraction of fields from head words */
  1767 #define cinuse(p)           ((p)->head & CINUSE_BIT)
  1768 #define pinuse(p)           ((p)->head & PINUSE_BIT)
  1769 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1770 
  1771 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1772 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1773 
  1774 /* Treat space at ptr +/- offset as a chunk */
  1775 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1776 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1777 
  1778 /* Ptr to next or previous physical malloc_chunk. */
  1779 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1780 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1781 
  1782 /* extract next chunk's pinuse bit */
  1783 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1784 
  1785 /* Get/set size at footer */
  1786 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1787 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1788 
  1789 /* Set size, pinuse bit, and foot */
  1790 #define set_size_and_pinuse_of_free_chunk(p, s)\
  1791   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1792 
  1793 /* Set size, pinuse bit, foot, and clear next pinuse */
  1794 #define set_free_with_pinuse(p, s, n)\
  1795   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1796 
  1797 #define is_mmapped(p)\
  1798   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1799 
  1800 /* Get the internal overhead associated with chunk p */
  1801 #define overhead_for(p)\
  1802  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1803 
  1804 /* Return true if malloced space is not necessarily cleared */
  1805 #if MMAP_CLEARS
  1806 #define calloc_must_clear(p) (!is_mmapped(p))
  1807 #else /* MMAP_CLEARS */
  1808 #define calloc_must_clear(p) (1)
  1809 #endif /* MMAP_CLEARS */
  1810 
  1811 /* ---------------------- Overlaid data structures ----------------------- */
  1812 
  1813 /*
  1814   When chunks are not in use, they are treated as nodes of either
  1815   lists or trees.
  1816 
  1817   "Small"  chunks are stored in circular doubly-linked lists, and look
  1818   like this:
  1819 
  1820     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1821             |             Size of previous chunk                            |
  1822             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1823     `head:' |             Size of chunk, in bytes                         |P|
  1824       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1825             |             Forward pointer to next chunk in list             |
  1826             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1827             |             Back pointer to previous chunk in list            |
  1828             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1829             |             Unused space (may be 0 bytes long)                .
  1830             .                                                               .
  1831             .                                                               |
  1832 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1833     `foot:' |             Size of chunk, in bytes                           |
  1834             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1835 
  1836   Larger chunks are kept in a form of bitwise digital trees (aka
  1837   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1838   free chunks greater than 256 bytes, their size doesn't impose any
  1839   constraints on user chunk sizes.  Each node looks like:
  1840 
  1841     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1842             |             Size of previous chunk                            |
  1843             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1844     `head:' |             Size of chunk, in bytes                         |P|
  1845       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1846             |             Forward pointer to next chunk of same size        |
  1847             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1848             |             Back pointer to previous chunk of same size       |
  1849             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1850             |             Pointer to left child (child[0])                  |
  1851             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1852             |             Pointer to right child (child[1])                 |
  1853             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1854             |             Pointer to parent                                 |
  1855             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1856             |             bin index of this chunk                           |
  1857             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1858             |             Unused space                                      .
  1859             .                                                               |
  1860 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1861     `foot:' |             Size of chunk, in bytes                           |
  1862             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1863 
  1864   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1865   of the same size are arranged in a circularly-linked list, with only
  1866   the oldest chunk (the next to be used, in our FIFO ordering)
  1867   actually in the tree.  (Tree members are distinguished by a non-null
  1868   parent pointer.)  If a chunk with the same size an an existing node
  1869   is inserted, it is linked off the existing node using pointers that
  1870   work in the same way as fd/bk pointers of small chunks.
  1871 
  1872   Each tree contains a power of 2 sized range of chunk sizes (the
  1873   smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1874   tree level, with the chunks in the smaller half of the range (0x100
  1875   <= x < 0x140 for the top nose) in the left subtree and the larger
  1876   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1877   done by inspecting individual bits.
  1878 
  1879   Using these rules, each node's left subtree contains all smaller
  1880   sizes than its right subtree.  However, the node at the root of each
  1881   subtree has no particular ordering relationship to either.  (The
  1882   dividing line between the subtree sizes is based on trie relation.)
  1883   If we remove the last chunk of a given size from the interior of the
  1884   tree, we need to replace it with a leaf node.  The tree ordering
  1885   rules permit a node to be replaced by any leaf below it.
  1886 
  1887   The smallest chunk in a tree (a common operation in a best-fit
  1888   allocator) can be found by walking a path to the leftmost leaf in
  1889   the tree.  Unlike a usual binary tree, where we follow left child
  1890   pointers until we reach a null, here we follow the right child
  1891   pointer any time the left one is null, until we reach a leaf with
  1892   both child pointers null. The smallest chunk in the tree will be
  1893   somewhere along that path.
  1894 
  1895   The worst case number of steps to add, find, or remove a node is
  1896   bounded by the number of bits differentiating chunks within
  1897   bins. Under current bin calculations, this ranges from 6 up to 21
  1898   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1899   is of course much better.
  1900 */
  1901 
  1902 struct malloc_tree_chunk {
  1903   /* The first four fields must be compatible with malloc_chunk */
  1904   size_t                    prev_foot;
  1905   size_t                    head;
  1906   struct malloc_tree_chunk* fd;
  1907   struct malloc_tree_chunk* bk;
  1908 
  1909   struct malloc_tree_chunk* child[2];
  1910   struct malloc_tree_chunk* parent;
  1911   bindex_t                  index;
  1912 };
  1913 
  1914 typedef struct malloc_tree_chunk  tchunk;
  1915 typedef struct malloc_tree_chunk* tchunkptr;
  1916 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1917 
  1918 /* A little helper macro for trees */
  1919 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1920 
  1921 /* ----------------------------- Segments -------------------------------- */
  1922 
  1923 /*
  1924   Each malloc space may include non-contiguous segments, held in a
  1925   list headed by an embedded malloc_segment record representing the
  1926   top-most space. Segments also include flags holding properties of
  1927   the space. Large chunks that are directly allocated by mmap are not
  1928   included in this list. They are instead independently created and
  1929   destroyed without otherwise keeping track of them.
  1930 
  1931   Segment management mainly comes into play for spaces allocated by
  1932   MMAP.  Any call to MMAP might or might not return memory that is
  1933   adjacent to an existing segment.  MORECORE normally contiguously
  1934   extends the current space, so this space is almost always adjacent,
  1935   which is simpler and faster to deal with. (This is why MORECORE is
  1936   used preferentially to MMAP when both are available -- see
  1937   sys_alloc.)  When allocating using MMAP, we don't use any of the
  1938   hinting mechanisms (inconsistently) supported in various
  1939   implementations of unix mmap, or distinguish reserving from
  1940   committing memory. Instead, we just ask for space, and exploit
  1941   contiguity when we get it.  It is probably possible to do
  1942   better than this on some systems, but no general scheme seems
  1943   to be significantly better.
  1944 
  1945   Management entails a simpler variant of the consolidation scheme
  1946   used for chunks to reduce fragmentation -- new adjacent memory is
  1947   normally prepended or appended to an existing segment. However,
  1948   there are limitations compared to chunk consolidation that mostly
  1949   reflect the fact that segment processing is relatively infrequent
  1950   (occurring only when getting memory from system) and that we
  1951   don't expect to have huge numbers of segments:
  1952 
  1953   * Segments are not indexed, so traversal requires linear scans.  (It
  1954     would be possible to index these, but is not worth the extra
  1955     overhead and complexity for most programs on most platforms.)
  1956   * New segments are only appended to old ones when holding top-most
  1957     memory; if they cannot be prepended to others, they are held in
  1958     different segments.
  1959 
  1960   Except for the top-most segment of an mstate, each segment record
  1961   is kept at the tail of its segment. Segments are added by pushing
  1962   segment records onto the list headed by &mstate.seg for the
  1963   containing mstate.
  1964 
  1965   Segment flags control allocation/merge/deallocation policies:
  1966   * If EXTERN_BIT set, then we did not allocate this segment,
  1967     and so should not try to deallocate or merge with others.
  1968     (This currently holds only for the initial segment passed
  1969     into create_mspace_with_base.)
  1970   * If IS_MMAPPED_BIT set, the segment may be merged with
  1971     other surrounding mmapped segments and trimmed/de-allocated
  1972     using munmap.
  1973   * If neither bit is set, then the segment was obtained using
  1974     MORECORE so can be merged with surrounding MORECORE'd segments
  1975     and deallocated/trimmed using MORECORE with negative arguments.
  1976 */
  1977 
  1978 struct malloc_segment {
  1979   char*        base;             /* base address */
  1980   size_t       size;             /* allocated size */
  1981   struct malloc_segment* next;   /* ptr to next segment */
  1982   flag_t       sflags;           /* mmap and extern flag */
  1983 };
  1984 
  1985 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1986 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1987 
  1988 typedef struct malloc_segment  msegment;
  1989 typedef struct malloc_segment* msegmentptr;
  1990 
  1991 /* ---------------------------- malloc_state ----------------------------- */
  1992 
  1993 /*
  1994    A malloc_state holds all of the bookkeeping for a space.
  1995    The main fields are:
  1996 
  1997   Top
  1998     The topmost chunk of the currently active segment. Its size is
  1999     cached in topsize.  The actual size of topmost space is
  2000     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  2001     fenceposts and segment records if necessary when getting more
  2002     space from the system.  The size at which to autotrim top is
  2003     cached from mparams in trim_check, except that it is disabled if
  2004     an autotrim fails.
  2005 
  2006   Designated victim (dv)
  2007     This is the preferred chunk for servicing small requests that
  2008     don't have exact fits.  It is normally the chunk split off most
  2009     recently to service another small request.  Its size is cached in
  2010     dvsize. The link fields of this chunk are not maintained since it
  2011     is not kept in a bin.
  2012 
  2013   SmallBins
  2014     An array of bin headers for free chunks.  These bins hold chunks
  2015     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  2016     chunks of all the same size, spaced 8 bytes apart.  To simplify
  2017     use in double-linked lists, each bin header acts as a malloc_chunk
  2018     pointing to the real first node, if it exists (else pointing to
  2019     itself).  This avoids special-casing for headers.  But to avoid
  2020     waste, we allocate only the fd/bk pointers of bins, and then use
  2021     repositioning tricks to treat these as the fields of a chunk.
  2022 
  2023   TreeBins
  2024     Treebins are pointers to the roots of trees holding a range of
  2025     sizes. There are 2 equally spaced treebins for each power of two
  2026     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  2027     larger.
  2028 
  2029   Bin maps
  2030     There is one bit map for small bins ("smallmap") and one for
  2031     treebins ("treemap).  Each bin sets its bit when non-empty, and
  2032     clears the bit when empty.  Bit operations are then used to avoid
  2033     bin-by-bin searching -- nearly all "search" is done without ever
  2034     looking at bins that won't be selected.  The bit maps
  2035     conservatively use 32 bits per map word, even if on 64bit system.
  2036     For a good description of some of the bit-based techniques used
  2037     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  2038     supplement at http://hackersdelight.org/). Many of these are
  2039     intended to reduce the branchiness of paths through malloc etc, as
  2040     well as to reduce the number of memory locations read or written.
  2041 
  2042   Segments
  2043     A list of segments headed by an embedded malloc_segment record
  2044     representing the initial space.
  2045 
  2046   Address check support
  2047     The least_addr field is the least address ever obtained from
  2048     MORECORE or MMAP. Attempted frees and reallocs of any address less
  2049     than this are trapped (unless INSECURE is defined).
  2050 
  2051   Magic tag
  2052     A cross-check field that should always hold same value as mparams.magic.
  2053 
  2054   Flags
  2055     Bits recording whether to use MMAP, locks, or contiguous MORECORE
  2056 
  2057   Statistics
  2058     Each space keeps track of current and maximum system memory
  2059     obtained via MORECORE or MMAP.
  2060 
  2061   Locking
  2062     If USE_LOCKS is defined, the "mutex" lock is acquired and released
  2063     around every public call using this mspace.
  2064 */
  2065 
  2066 /* Bin types, widths and sizes */
  2067 #define NSMALLBINS        (32U)
  2068 #define NTREEBINS         (32U)
  2069 #define SMALLBIN_SHIFT    (3U)
  2070 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  2071 #define TREEBIN_SHIFT     (8U)
  2072 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2073 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2074 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2075 
  2076 struct malloc_state {
  2077   binmap_t   smallmap;
  2078   binmap_t   treemap;
  2079   size_t     dvsize;
  2080   size_t     topsize;
  2081   char*      least_addr;
  2082   mchunkptr  dv;
  2083   mchunkptr  top;
  2084   size_t     trim_check;
  2085   size_t     magic;
  2086   mchunkptr  smallbins[(NSMALLBINS+1)*2];
  2087   tbinptr    treebins[NTREEBINS];
  2088   size_t     footprint;
  2089   size_t     max_footprint;
  2090   flag_t     mflags;
  2091 #if USE_LOCKS
  2092   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  2093 #endif /* USE_LOCKS */
  2094   msegment   seg;
  2095 };
  2096 
  2097 typedef struct malloc_state*    mstate;
  2098 
  2099 /* ------------- Global malloc_state and malloc_params ------------------- */
  2100 
  2101 /*
  2102   malloc_params holds global properties, including those that can be
  2103   dynamically set using mallopt. There is a single instance, mparams,
  2104   initialized in init_mparams.
  2105 */
  2106 
  2107 struct malloc_params {
  2108   size_t magic;
  2109   size_t page_size;
  2110   size_t granularity;
  2111   size_t mmap_threshold;
  2112   size_t trim_threshold;
  2113   flag_t default_mflags;
  2114 };
  2115 
  2116 static struct malloc_params mparams;
  2117 
  2118 /* The global malloc_state used for all non-"mspace" calls */
  2119 static struct malloc_state _gm_;
  2120 #define gm                 (&_gm_)
  2121 #define is_global(M)       ((M) == &_gm_)
  2122 #define is_initialized(M)  ((M)->top != 0)
  2123 
  2124 /* -------------------------- system alloc setup ------------------------- */
  2125 
  2126 /* Operations on mflags */
  2127 
  2128 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  2129 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  2130 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  2131 
  2132 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  2133 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  2134 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  2135 
  2136 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  2137 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  2138 
  2139 #define set_lock(M,L)\
  2140  ((M)->mflags = (L)?\
  2141   ((M)->mflags | USE_LOCK_BIT) :\
  2142   ((M)->mflags & ~USE_LOCK_BIT))
  2143 
  2144 /* page-align a size */
  2145 #define page_align(S)\
  2146  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  2147 
  2148 /* granularity-align a size */
  2149 #define granularity_align(S)\
  2150   (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  2151 
  2152 #define is_page_aligned(S)\
  2153    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  2154 #define is_granularity_aligned(S)\
  2155    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  2156 
  2157 /*  True if segment S holds address A */
  2158 #define segment_holds(S, A)\
  2159   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2160 
  2161 /* Return segment holding given address */
  2162 static msegmentptr segment_holding(mstate m, char* addr) {
  2163   msegmentptr sp = &m->seg;
  2164   for (;;) {
  2165     if (addr >= sp->base && addr < sp->base + sp->size)
  2166       return sp;
  2167     if ((sp = sp->next) == 0)
  2168       return 0;
  2169   }
  2170 }
  2171 
  2172 /* Return true if segment contains a segment link */
  2173 static int has_segment_link(mstate m, msegmentptr ss) {
  2174   msegmentptr sp = &m->seg;
  2175   for (;;) {
  2176     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  2177       return 1;
  2178     if ((sp = sp->next) == 0)
  2179       return 0;
  2180   }
  2181 }
  2182 
  2183 #ifndef MORECORE_CANNOT_TRIM
  2184 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2185 #else  /* MORECORE_CANNOT_TRIM */
  2186 #define should_trim(M,s)  (0)
  2187 #endif /* MORECORE_CANNOT_TRIM */
  2188 
  2189 /*
  2190   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2191   that may be needed to place segment records and fenceposts when new
  2192   noncontiguous segments are added.
  2193 */
  2194 #define TOP_FOOT_SIZE\
  2195   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  2196 
  2197 
  2198 /* -------------------------------  Hooks -------------------------------- */
  2199 
  2200 /*
  2201   PREACTION should be defined to return 0 on success, and nonzero on
  2202   failure. If you are not using locking, you can redefine these to do
  2203   anything you like.
  2204 */
  2205 
  2206 #if USE_LOCKS
  2207 
  2208 /* Ensure locks are initialized */
  2209 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  2210 
  2211 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  2212 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2213 #else /* USE_LOCKS */
  2214 
  2215 #ifndef PREACTION
  2216 #define PREACTION(M) (0)
  2217 #endif  /* PREACTION */
  2218 
  2219 #ifndef POSTACTION
  2220 #define POSTACTION(M)
  2221 #endif  /* POSTACTION */
  2222 
  2223 #endif /* USE_LOCKS */
  2224 
  2225 /*
  2226   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2227   USAGE_ERROR_ACTION is triggered on detected bad frees and
  2228   reallocs. The argument p is an address that might have triggered the
  2229   fault. It is ignored by the two predefined actions, but might be
  2230   useful in custom actions that try to help diagnose errors.
  2231 */
  2232 
  2233 #if PROCEED_ON_ERROR
  2234 
  2235 /* A count of the number of corruption errors causing resets */
  2236 int malloc_corruption_error_count;
  2237 
  2238 /* default corruption action */
  2239 static void reset_on_error(mstate m);
  2240 
  2241 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  2242 #define USAGE_ERROR_ACTION(m, p)
  2243 
  2244 #else /* PROCEED_ON_ERROR */
  2245 
  2246 #ifndef CORRUPTION_ERROR_ACTION
  2247 #define CORRUPTION_ERROR_ACTION(m) ABORT
  2248 #endif /* CORRUPTION_ERROR_ACTION */
  2249 
  2250 #ifndef USAGE_ERROR_ACTION
  2251 #define USAGE_ERROR_ACTION(m,p) ABORT
  2252 #endif /* USAGE_ERROR_ACTION */
  2253 
  2254 #endif /* PROCEED_ON_ERROR */
  2255 
  2256 /* -------------------------- Debugging setup ---------------------------- */
  2257 
  2258 #if ! DEBUG
  2259 
  2260 #define check_free_chunk(M,P)
  2261 #define check_inuse_chunk(M,P)
  2262 #define check_malloced_chunk(M,P,N)
  2263 #define check_mmapped_chunk(M,P)
  2264 #define check_malloc_state(M)
  2265 #define check_top_chunk(M,P)
  2266 
  2267 #else /* DEBUG */
  2268 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  2269 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  2270 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2271 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2272 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2273 #define check_malloc_state(M)       do_check_malloc_state(M)
  2274 
  2275 static void   do_check_any_chunk(mstate m, mchunkptr p);
  2276 static void   do_check_top_chunk(mstate m, mchunkptr p);
  2277 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  2278 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  2279 static void   do_check_free_chunk(mstate m, mchunkptr p);
  2280 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  2281 static void   do_check_tree(mstate m, tchunkptr t);
  2282 static void   do_check_treebin(mstate m, bindex_t i);
  2283 static void   do_check_smallbin(mstate m, bindex_t i);
  2284 static void   do_check_malloc_state(mstate m);
  2285 static int    bin_find(mstate m, mchunkptr x);
  2286 static size_t traverse_and_check(mstate m);
  2287 #endif /* DEBUG */
  2288 
  2289 /* ---------------------------- Indexing Bins ---------------------------- */
  2290 
  2291 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  2292 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  2293 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  2294 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  2295 
  2296 /* addressing by index. See above about smallbin repositioning */
  2297 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  2298 #define treebin_at(M,i)     (&((M)->treebins[i]))
  2299 
  2300 /* assign tree index for size S to variable I */
  2301 #if defined(__GNUC__) && defined(i386)
  2302 #define compute_tree_index(S, I)\
  2303 {\
  2304   size_t X = S >> TREEBIN_SHIFT;\
  2305   if (X == 0)\
  2306     I = 0;\
  2307   else if (X > 0xFFFF)\
  2308     I = NTREEBINS-1;\
  2309   else {\
  2310     unsigned int K;\
  2311     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  2312     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  2313   }\
  2314 }
  2315 #else /* GNUC */
  2316 #define compute_tree_index(S, I)\
  2317 {\
  2318   size_t X = S >> TREEBIN_SHIFT;\
  2319   if (X == 0)\
  2320     I = 0;\
  2321   else if (X > 0xFFFF)\
  2322     I = NTREEBINS-1;\
  2323   else {\
  2324     unsigned int Y = (unsigned int)X;\
  2325     unsigned int N = ((Y - 0x100) >> 16) & 8;\
  2326     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  2327     N += K;\
  2328     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  2329     K = 14 - N + ((Y <<= K) >> 15);\
  2330     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  2331   }\
  2332 }
  2333 #endif /* GNUC */
  2334 
  2335 /* Bit representing maximum resolved size in a treebin at i */
  2336 #define bit_for_tree_index(i) \
  2337    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  2338 
  2339 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
  2340 #define leftshift_for_tree_index(i) \
  2341    ((i == NTREEBINS-1)? 0 : \
  2342     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  2343 
  2344 /* The size of the smallest chunk held in bin with index i */
  2345 #define minsize_for_tree_index(i) \
  2346    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  2347    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  2348 
  2349 
  2350 /* ------------------------ Operations on bin maps ----------------------- */
  2351 
  2352 /* bit corresponding to given index */
  2353 #define idx2bit(i)              ((binmap_t)(1) << (i))
  2354 
  2355 /* Mark/Clear bits with given index */
  2356 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  2357 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  2358 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  2359 
  2360 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  2361 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  2362 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  2363 
  2364 /* index corresponding to given bit */
  2365 
  2366 #if defined(__GNUC__) && defined(i386)
  2367 #define compute_bit2idx(X, I)\
  2368 {\
  2369   unsigned int J;\
  2370   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  2371   I = (bindex_t)J;\
  2372 }
  2373 
  2374 #else /* GNUC */
  2375 #if  USE_BUILTIN_FFS
  2376 #define compute_bit2idx(X, I) I = ffs(X)-1
  2377 
  2378 #else /* USE_BUILTIN_FFS */
  2379 #define compute_bit2idx(X, I)\
  2380 {\
  2381   unsigned int Y = X - 1;\
  2382   unsigned int K = Y >> (16-4) & 16;\
  2383   unsigned int N = K;        Y >>= K;\
  2384   N += K = Y >> (8-3) &  8;  Y >>= K;\
  2385   N += K = Y >> (4-2) &  4;  Y >>= K;\
  2386   N += K = Y >> (2-1) &  2;  Y >>= K;\
  2387   N += K = Y >> (1-0) &  1;  Y >>= K;\
  2388   I = (bindex_t)(N + Y);\
  2389 }
  2390 #endif /* USE_BUILTIN_FFS */
  2391 #endif /* GNUC */
  2392 
  2393 /* isolate the least set bit of a bitmap */
  2394 #define least_bit(x)         ((x) & -(x))
  2395 
  2396 /* mask with all bits to left of least bit of x on */
  2397 #define left_bits(x)         ((x<<1) | -(x<<1))
  2398 
  2399 /* mask with all bits to left of or equal to least bit of x on */
  2400 #define same_or_left_bits(x) ((x) | -(x))
  2401 
  2402 
  2403 /* ----------------------- Runtime Check Support ------------------------- */
  2404 
  2405 /*
  2406   For security, the main invariant is that malloc/free/etc never
  2407   writes to a static address other than malloc_state, unless static
  2408   malloc_state itself has been corrupted, which cannot occur via
  2409   malloc (because of these checks). In essence this means that we
  2410   believe all pointers, sizes, maps etc held in malloc_state, but
  2411   check all of those linked or offsetted from other embedded data
  2412   structures.  These checks are interspersed with main code in a way
  2413   that tends to minimize their run-time cost.
  2414 
  2415   When FOOTERS is defined, in addition to range checking, we also
  2416   verify footer fields of inuse chunks, which can be used guarantee
  2417   that the mstate controlling malloc/free is intact.  This is a
  2418   streamlined version of the approach described by William Robertson
  2419   et al in "Run-time Detection of Heap-based Overflows" LISA'03
  2420   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  2421   of an inuse chunk holds the xor of its mstate and a random seed,
  2422   that is checked upon calls to free() and realloc().  This is
  2423   (probablistically) unguessable from outside the program, but can be
  2424   computed by any code successfully malloc'ing any chunk, so does not
  2425   itself provide protection against code that has already broken
  2426   security through some other means.  Unlike Robertson et al, we
  2427   always dynamically check addresses of all offset chunks (previous,
  2428   next, etc). This turns out to be cheaper than relying on hashes.
  2429 */
  2430 
  2431 #if !INSECURE
  2432 /* Check if address a is at least as high as any from MORECORE or MMAP */
  2433 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  2434 /* Check if address of next chunk n is higher than base chunk p */
  2435 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
  2436 /* Check if p has its cinuse bit on */
  2437 #define ok_cinuse(p)     cinuse(p)
  2438 /* Check if p has its pinuse bit on */
  2439 #define ok_pinuse(p)     pinuse(p)
  2440 
  2441 #else /* !INSECURE */
  2442 #define ok_address(M, a) (1)
  2443 #define ok_next(b, n)    (1)
  2444 #define ok_cinuse(p)     (1)
  2445 #define ok_pinuse(p)     (1)
  2446 #endif /* !INSECURE */
  2447 
  2448 #if (FOOTERS && !INSECURE)
  2449 /* Check if (alleged) mstate m has expected magic field */
  2450 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2451 #else  /* (FOOTERS && !INSECURE) */
  2452 #define ok_magic(M)      (1)
  2453 #endif /* (FOOTERS && !INSECURE) */
  2454 
  2455 
  2456 /* In gcc, use __builtin_expect to minimize impact of checks */
  2457 #if !INSECURE
  2458 #if defined(__GNUC__) && __GNUC__ >= 3
  2459 #define RTCHECK(e)  __builtin_expect(e, 1)
  2460 #else /* GNUC */
  2461 #define RTCHECK(e)  (e)
  2462 #endif /* GNUC */
  2463 #else /* !INSECURE */
  2464 #define RTCHECK(e)  (1)
  2465 #endif /* !INSECURE */
  2466 
  2467 /* macros to set up inuse chunks with or without footers */
  2468 
  2469 #if !FOOTERS
  2470 
  2471 #define mark_inuse_foot(M,p,s)
  2472 
  2473 /* Set cinuse bit and pinuse bit of next chunk */
  2474 #define set_inuse(M,p,s)\
  2475   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2476   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2477 
  2478 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  2479 #define set_inuse_and_pinuse(M,p,s)\
  2480   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2481   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2482 
  2483 /* Set size, cinuse and pinuse bit of this chunk */
  2484 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2485   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  2486 
  2487 #else /* FOOTERS */
  2488 
  2489 /* Set foot of inuse chunk to be xor of mstate and seed */
  2490 #define mark_inuse_foot(M,p,s)\
  2491   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  2492 
  2493 #define get_mstate_for(p)\
  2494   ((mstate)(((mchunkptr)((char*)(p) +\
  2495     (chunksize(p))))->prev_foot ^ mparams.magic))
  2496 
  2497 #define set_inuse(M,p,s)\
  2498   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2499   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  2500   mark_inuse_foot(M,p,s))
  2501 
  2502 #define set_inuse_and_pinuse(M,p,s)\
  2503   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2504   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  2505  mark_inuse_foot(M,p,s))
  2506 
  2507 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2508   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2509   mark_inuse_foot(M, p, s))
  2510 
  2511 #endif /* !FOOTERS */
  2512 
  2513 /* ---------------------------- setting mparams -------------------------- */
  2514 
  2515 /* Initialize mparams */
  2516 static int init_mparams(void) {
  2517   if (mparams.page_size == 0) {
  2518     size_t s;
  2519 
  2520     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2521     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2522 #if MORECORE_CONTIGUOUS
  2523     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  2524 #else  /* MORECORE_CONTIGUOUS */
  2525     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  2526 #endif /* MORECORE_CONTIGUOUS */
  2527 
  2528 #if (FOOTERS && !INSECURE)
  2529     {
  2530 #if USE_DEV_RANDOM
  2531       int fd;
  2532       unsigned char buf[sizeof(size_t)];
  2533       /* Try to use /dev/urandom, else fall back on using time */
  2534       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2535           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2536         s = *((size_t *) buf);
  2537         close(fd);
  2538       }
  2539       else
  2540 #endif /* USE_DEV_RANDOM */
  2541         s = (size_t)(time(0) ^ (size_t)0x55555555U);
  2542 
  2543       s |= (size_t)8U;    /* ensure nonzero */
  2544       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  2545 
  2546     }
  2547 #else /* (FOOTERS && !INSECURE) */
  2548     s = (size_t)0x58585858U;
  2549 #endif /* (FOOTERS && !INSECURE) */
  2550     ACQUIRE_MAGIC_INIT_LOCK();
  2551     if (mparams.magic == 0) {
  2552       mparams.magic = s;
  2553       /* Set up lock for main malloc area */
  2554       INITIAL_LOCK(&gm->mutex);
  2555       gm->mflags = mparams.default_mflags;
  2556     }
  2557     RELEASE_MAGIC_INIT_LOCK();
  2558 
  2559 #if !defined(WIN32) && !defined(__OS2__)
  2560     mparams.page_size = malloc_getpagesize;
  2561     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  2562                            DEFAULT_GRANULARITY : mparams.page_size);
  2563 #elif defined (__OS2__)
  2564     /* if low-memory is used, os2munmap() would break
  2565        if it were anything other than 64k */
  2566     mparams.page_size = 4096u;
  2567     mparams.granularity = 65536u;
  2568 #else /* WIN32 */
  2569     {
  2570       SYSTEM_INFO system_info;
  2571       GetSystemInfo(&system_info);
  2572       mparams.page_size = system_info.dwPageSize;
  2573       mparams.granularity = system_info.dwAllocationGranularity;
  2574     }
  2575 #endif /* WIN32 */
  2576 
  2577     /* Sanity-check configuration:
  2578        size_t must be unsigned and as wide as pointer type.
  2579        ints must be at least 4 bytes.
  2580        alignment must be at least 8.
  2581        Alignment, min chunk size, and page size must all be powers of 2.
  2582     */
  2583     if ((sizeof(size_t) != sizeof(char*)) ||
  2584         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  2585         (sizeof(int) < 4)  ||
  2586         (MALLOC_ALIGNMENT < (size_t)8U) ||
  2587         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  2588         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  2589         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  2590         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  2591       ABORT;
  2592   }
  2593   return 0;
  2594 }
  2595 
  2596 /* support for mallopt */
  2597 static int change_mparam(int param_number, int value) {
  2598   size_t val = (size_t)value;
  2599   init_mparams();
  2600   switch(param_number) {
  2601   case M_TRIM_THRESHOLD:
  2602     mparams.trim_threshold = val;
  2603     return 1;
  2604   case M_GRANULARITY:
  2605     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  2606       mparams.granularity = val;
  2607       return 1;
  2608     }
  2609     else
  2610       return 0;
  2611   case M_MMAP_THRESHOLD:
  2612     mparams.mmap_threshold = val;
  2613     return 1;
  2614   default:
  2615     return 0;
  2616   }
  2617 }
  2618 
  2619 #if DEBUG
  2620 /* ------------------------- Debugging Support --------------------------- */
  2621 
  2622 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2623 static void do_check_any_chunk(mstate m, mchunkptr p) {
  2624   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2625   assert(ok_address(m, p));
  2626 }
  2627 
  2628 /* Check properties of top chunk */
  2629 static void do_check_top_chunk(mstate m, mchunkptr p) {
  2630   msegmentptr sp = segment_holding(m, (char*)p);
  2631   size_t  sz = chunksize(p);
  2632   assert(sp != 0);
  2633   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2634   assert(ok_address(m, p));
  2635   assert(sz == m->topsize);
  2636   assert(sz > 0);
  2637   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  2638   assert(pinuse(p));
  2639   assert(!next_pinuse(p));
  2640 }
  2641 
  2642 /* Check properties of (inuse) mmapped chunks */
  2643 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  2644   size_t  sz = chunksize(p);
  2645   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2646   assert(is_mmapped(p));
  2647   assert(use_mmap(m));
  2648   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2649   assert(ok_address(m, p));
  2650   assert(!is_small(sz));
  2651   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  2652   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2653   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  2654 }
  2655 
  2656 /* Check properties of inuse chunks */
  2657 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  2658   do_check_any_chunk(m, p);
  2659   assert(cinuse(p));
  2660   assert(next_pinuse(p));
  2661   /* If not pinuse and not mmapped, previous chunk has OK offset */
  2662   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2663   if (is_mmapped(p))
  2664     do_check_mmapped_chunk(m, p);
  2665 }
  2666 
  2667 /* Check properties of free chunks */
  2668 static void do_check_free_chunk(mstate m, mchunkptr p) {
  2669   size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2670   mchunkptr next = chunk_plus_offset(p, sz);
  2671   do_check_any_chunk(m, p);
  2672   assert(!cinuse(p));
  2673   assert(!next_pinuse(p));
  2674   assert (!is_mmapped(p));
  2675   if (p != m->dv && p != m->top) {
  2676     if (sz >= MIN_CHUNK_SIZE) {
  2677       assert((sz & CHUNK_ALIGN_MASK) == 0);
  2678       assert(is_aligned(chunk2mem(p)));
  2679       assert(next->prev_foot == sz);
  2680       assert(pinuse(p));
  2681       assert (next == m->top || cinuse(next));
  2682       assert(p->fd->bk == p);
  2683       assert(p->bk->fd == p);
  2684     }
  2685     else  /* markers are always of size SIZE_T_SIZE */
  2686       assert(sz == SIZE_T_SIZE);
  2687   }
  2688 }
  2689 
  2690 /* Check properties of malloced chunks at the point they are malloced */
  2691 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  2692   if (mem != 0) {
  2693     mchunkptr p = mem2chunk(mem);
  2694     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2695     do_check_inuse_chunk(m, p);
  2696     assert((sz & CHUNK_ALIGN_MASK) == 0);
  2697     assert(sz >= MIN_CHUNK_SIZE);
  2698     assert(sz >= s);
  2699     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2700     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2701   }
  2702 }
  2703 
  2704 /* Check a tree and its subtrees.  */
  2705 static void do_check_tree(mstate m, tchunkptr t) {
  2706   tchunkptr head = 0;
  2707   tchunkptr u = t;
  2708   bindex_t tindex = t->index;
  2709   size_t tsize = chunksize(t);
  2710   bindex_t idx;
  2711   compute_tree_index(tsize, idx);
  2712   assert(tindex == idx);
  2713   assert(tsize >= MIN_LARGE_SIZE);
  2714   assert(tsize >= minsize_for_tree_index(idx));
  2715   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  2716 
  2717   do { /* traverse through chain of same-sized nodes */
  2718     do_check_any_chunk(m, ((mchunkptr)u));
  2719     assert(u->index == tindex);
  2720     assert(chunksize(u) == tsize);
  2721     assert(!cinuse(u));
  2722     assert(!next_pinuse(u));
  2723     assert(u->fd->bk == u);
  2724     assert(u->bk->fd == u);
  2725     if (u->parent == 0) {
  2726       assert(u->child[0] == 0);
  2727       assert(u->child[1] == 0);
  2728     }
  2729     else {
  2730       assert(head == 0); /* only one node on chain has parent */
  2731       head = u;
  2732       assert(u->parent != u);
  2733       assert (u->parent->child[0] == u ||
  2734               u->parent->child[1] == u ||
  2735               *((tbinptr*)(u->parent)) == u);
  2736       if (u->child[0] != 0) {
  2737         assert(u->child[0]->parent == u);
  2738         assert(u->child[0] != u);
  2739         do_check_tree(m, u->child[0]);
  2740       }
  2741       if (u->child[1] != 0) {
  2742         assert(u->child[1]->parent == u);
  2743         assert(u->child[1] != u);
  2744         do_check_tree(m, u->child[1]);
  2745       }
  2746       if (u->child[0] != 0 && u->child[1] != 0) {
  2747         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2748       }
  2749     }
  2750     u = u->fd;
  2751   } while (u != t);
  2752   assert(head != 0);
  2753 }
  2754 
  2755 /*  Check all the chunks in a treebin.  */
  2756 static void do_check_treebin(mstate m, bindex_t i) {
  2757   tbinptr* tb = treebin_at(m, i);
  2758   tchunkptr t = *tb;
  2759   int empty = (m->treemap & (1U << i)) == 0;
  2760   if (t == 0)
  2761     assert(empty);
  2762   if (!empty)
  2763     do_check_tree(m, t);
  2764 }
  2765 
  2766 /*  Check all the chunks in a smallbin.  */
  2767 static void do_check_smallbin(mstate m, bindex_t i) {
  2768   sbinptr b = smallbin_at(m, i);
  2769   mchunkptr p = b->bk;
  2770   unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2771   if (p == b)
  2772     assert(empty);
  2773   if (!empty) {
  2774     for (; p != b; p = p->bk) {
  2775       size_t size = chunksize(p);
  2776       mchunkptr q;
  2777       /* each chunk claims to be free */
  2778       do_check_free_chunk(m, p);
  2779       /* chunk belongs in bin */
  2780       assert(small_index(size) == i);
  2781       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2782       /* chunk is followed by an inuse chunk */
  2783       q = next_chunk(p);
  2784       if (q->head != FENCEPOST_HEAD)
  2785         do_check_inuse_chunk(m, q);
  2786     }
  2787   }
  2788 }
  2789 
  2790 /* Find x in a bin. Used in other check functions. */
  2791 static int bin_find(mstate m, mchunkptr x) {
  2792   size_t size = chunksize(x);
  2793   if (is_small(size)) {
  2794     bindex_t sidx = small_index(size);
  2795     sbinptr b = smallbin_at(m, sidx);
  2796     if (smallmap_is_marked(m, sidx)) {
  2797       mchunkptr p = b;
  2798       do {
  2799         if (p == x)
  2800           return 1;
  2801       } while ((p = p->fd) != b);
  2802     }
  2803   }
  2804   else {
  2805     bindex_t tidx;
  2806     compute_tree_index(size, tidx);
  2807     if (treemap_is_marked(m, tidx)) {
  2808       tchunkptr t = *treebin_at(m, tidx);
  2809       size_t sizebits = size << leftshift_for_tree_index(tidx);
  2810       while (t != 0 && chunksize(t) != size) {
  2811         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  2812         sizebits <<= 1;
  2813       }
  2814       if (t != 0) {
  2815         tchunkptr u = t;
  2816         do {
  2817           if (u == (tchunkptr)x)
  2818             return 1;
  2819         } while ((u = u->fd) != t);
  2820       }
  2821     }
  2822   }
  2823   return 0;
  2824 }
  2825 
  2826 /* Traverse each chunk and check it; return total */
  2827 static size_t traverse_and_check(mstate m) {
  2828   size_t sum = 0;
  2829   if (is_initialized(m)) {
  2830     msegmentptr s = &m->seg;
  2831     sum += m->topsize + TOP_FOOT_SIZE;
  2832     while (s != 0) {
  2833       mchunkptr q = align_as_chunk(s->base);
  2834       mchunkptr lastq = 0;
  2835       assert(pinuse(q));
  2836       while (segment_holds(s, q) &&
  2837              q != m->top && q->head != FENCEPOST_HEAD) {
  2838         sum += chunksize(q);
  2839         if (cinuse(q)) {
  2840           assert(!bin_find(m, q));
  2841           do_check_inuse_chunk(m, q);
  2842         }
  2843         else {
  2844           assert(q == m->dv || bin_find(m, q));
  2845           assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  2846           do_check_free_chunk(m, q);
  2847         }
  2848         lastq = q;
  2849         q = next_chunk(q);
  2850       }
  2851       s = s->next;
  2852     }
  2853   }
  2854   return sum;
  2855 }
  2856 
  2857 /* Check all properties of malloc_state. */
  2858 static void do_check_malloc_state(mstate m) {
  2859   bindex_t i;
  2860   size_t total;
  2861   /* check bins */
  2862   for (i = 0; i < NSMALLBINS; ++i)
  2863     do_check_smallbin(m, i);
  2864   for (i = 0; i < NTREEBINS; ++i)
  2865     do_check_treebin(m, i);
  2866 
  2867   if (m->dvsize != 0) { /* check dv chunk */
  2868     do_check_any_chunk(m, m->dv);
  2869     assert(m->dvsize == chunksize(m->dv));
  2870     assert(m->dvsize >= MIN_CHUNK_SIZE);
  2871     assert(bin_find(m, m->dv) == 0);
  2872   }
  2873 
  2874   if (m->top != 0) {   /* check top chunk */
  2875     do_check_top_chunk(m, m->top);
  2876     assert(m->topsize == chunksize(m->top));
  2877     assert(m->topsize > 0);
  2878     assert(bin_find(m, m->top) == 0);
  2879   }
  2880 
  2881   total = traverse_and_check(m);
  2882   assert(total <= m->footprint);
  2883   assert(m->footprint <= m->max_footprint);
  2884 }
  2885 #endif /* DEBUG */
  2886 
  2887 /* ----------------------------- statistics ------------------------------ */
  2888 
  2889 #if !NO_MALLINFO
  2890 static struct mallinfo internal_mallinfo(mstate m) {
  2891   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  2892   if (!PREACTION(m)) {
  2893     check_malloc_state(m);
  2894     if (is_initialized(m)) {
  2895       size_t nfree = SIZE_T_ONE; /* top always free */
  2896       size_t mfree = m->topsize + TOP_FOOT_SIZE;
  2897       size_t sum = mfree;
  2898       msegmentptr s = &m->seg;
  2899       while (s != 0) {
  2900         mchunkptr q = align_as_chunk(s->base);
  2901         while (segment_holds(s, q) &&
  2902                q != m->top && q->head != FENCEPOST_HEAD) {
  2903           size_t sz = chunksize(q);
  2904           sum += sz;
  2905           if (!cinuse(q)) {
  2906             mfree += sz;
  2907             ++nfree;
  2908           }
  2909           q = next_chunk(q);
  2910         }
  2911         s = s->next;
  2912       }
  2913 
  2914       nm.arena    = sum;
  2915       nm.ordblks  = nfree;
  2916       nm.hblkhd   = m->footprint - sum;
  2917       nm.usmblks  = m->max_footprint;
  2918       nm.uordblks = m->footprint - mfree;
  2919       nm.fordblks = mfree;
  2920       nm.keepcost = m->topsize;
  2921     }
  2922 
  2923     POSTACTION(m);
  2924   }
  2925   return nm;
  2926 }
  2927 #endif /* !NO_MALLINFO */
  2928 
  2929 static void internal_malloc_stats(mstate m) {
  2930   if (!PREACTION(m)) {
  2931     size_t maxfp = 0;
  2932     size_t fp = 0;
  2933     size_t used = 0;
  2934     check_malloc_state(m);
  2935     if (is_initialized(m)) {
  2936       msegmentptr s = &m->seg;
  2937       maxfp = m->max_footprint;
  2938       fp = m->footprint;
  2939       used = fp - (m->topsize + TOP_FOOT_SIZE);
  2940 
  2941       while (s != 0) {
  2942         mchunkptr q = align_as_chunk(s->base);
  2943         while (segment_holds(s, q) &&
  2944                q != m->top && q->head != FENCEPOST_HEAD) {
  2945           if (!cinuse(q))
  2946             used -= chunksize(q);
  2947           q = next_chunk(q);
  2948         }
  2949         s = s->next;
  2950       }
  2951     }
  2952 
  2953 #ifndef LACKS_STDIO_H
  2954     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  2955     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  2956     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  2957 #endif
  2958 
  2959     POSTACTION(m);
  2960   }
  2961 }
  2962 
  2963 /* ----------------------- Operations on smallbins ----------------------- */
  2964 
  2965 /*
  2966   Various forms of linking and unlinking are defined as macros.  Even
  2967   the ones for trees, which are very long but have very short typical
  2968   paths.  This is ugly but reduces reliance on inlining support of
  2969   compilers.
  2970 */
  2971 
  2972 /* Link a free chunk into a smallbin  */
  2973 #define insert_small_chunk(M, P, S) {\
  2974   bindex_t I  = small_index(S);\
  2975   mchunkptr B = smallbin_at(M, I);\
  2976   mchunkptr F = B;\
  2977   assert(S >= MIN_CHUNK_SIZE);\
  2978   if (!smallmap_is_marked(M, I))\
  2979     mark_smallmap(M, I);\
  2980   else if (RTCHECK(ok_address(M, B->fd)))\
  2981     F = B->fd;\
  2982   else {\
  2983     CORRUPTION_ERROR_ACTION(M);\
  2984   }\
  2985   B->fd = P;\
  2986   F->bk = P;\
  2987   P->fd = F;\
  2988   P->bk = B;\
  2989 }
  2990 
  2991 /* Unlink a chunk from a smallbin  */
  2992 #define unlink_small_chunk(M, P, S) {\
  2993   mchunkptr F = P->fd;\
  2994   mchunkptr B = P->bk;\
  2995   bindex_t I = small_index(S);\
  2996   assert(P != B);\
  2997   assert(P != F);\
  2998   assert(chunksize(P) == small_index2size(I));\
  2999   if (F == B)\
  3000     clear_smallmap(M, I);\
  3001   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  3002                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  3003     F->bk = B;\
  3004     B->fd = F;\
  3005   }\
  3006   else {\
  3007     CORRUPTION_ERROR_ACTION(M);\
  3008   }\
  3009 }
  3010 
  3011 /* Unlink the first chunk from a smallbin */
  3012 #define unlink_first_small_chunk(M, B, P, I) {\
  3013   mchunkptr F = P->fd;\
  3014   assert(P != B);\
  3015   assert(P != F);\
  3016   assert(chunksize(P) == small_index2size(I));\
  3017   if (B == F)\
  3018     clear_smallmap(M, I);\
  3019   else if (RTCHECK(ok_address(M, F))) {\
  3020     B->fd = F;\
  3021     F->bk = B;\
  3022   }\
  3023   else {\
  3024     CORRUPTION_ERROR_ACTION(M);\
  3025   }\
  3026 }
  3027 
  3028 /* Replace dv node, binning the old one */
  3029 /* Used only when dvsize known to be small */
  3030 #define replace_dv(M, P, S) {\
  3031   size_t DVS = M->dvsize;\
  3032   if (DVS != 0) {\
  3033     mchunkptr DV = M->dv;\
  3034     assert(is_small(DVS));\
  3035     insert_small_chunk(M, DV, DVS);\
  3036   }\
  3037   M->dvsize = S;\
  3038   M->dv = P;\
  3039 }
  3040 
  3041 /* ------------------------- Operations on trees ------------------------- */
  3042 
  3043 /* Insert chunk into tree */
  3044 #define insert_large_chunk(M, X, S) {\
  3045   tbinptr* H;\
  3046   bindex_t I;\
  3047   compute_tree_index(S, I);\
  3048   H = treebin_at(M, I);\
  3049   X->index = I;\
  3050   X->child[0] = X->child[1] = 0;\
  3051   if (!treemap_is_marked(M, I)) {\
  3052     mark_treemap(M, I);\
  3053     *H = X;\
  3054     X->parent = (tchunkptr)H;\
  3055     X->fd = X->bk = X;\
  3056   }\
  3057   else {\
  3058     tchunkptr T = *H;\
  3059     size_t K = S << leftshift_for_tree_index(I);\
  3060     for (;;) {\
  3061       if (chunksize(T) != S) {\
  3062         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  3063         K <<= 1;\
  3064         if (*C != 0)\
  3065           T = *C;\
  3066         else if (RTCHECK(ok_address(M, C))) {\
  3067           *C = X;\
  3068           X->parent = T;\
  3069           X->fd = X->bk = X;\
  3070           break;\
  3071         }\
  3072         else {\
  3073           CORRUPTION_ERROR_ACTION(M);\
  3074           break;\
  3075         }\
  3076       }\
  3077       else {\
  3078         tchunkptr F = T->fd;\
  3079         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  3080           T->fd = F->bk = X;\
  3081           X->fd = F;\
  3082           X->bk = T;\
  3083           X->parent = 0;\
  3084           break;\
  3085         }\
  3086         else {\
  3087           CORRUPTION_ERROR_ACTION(M);\
  3088           break;\
  3089         }\
  3090       }\
  3091     }\
  3092   }\
  3093 }
  3094 
  3095 /*
  3096   Unlink steps:
  3097 
  3098   1. If x is a chained node, unlink it from its same-sized fd/bk links
  3099      and choose its bk node as its replacement.
  3100   2. If x was the last node of its size, but not a leaf node, it must
  3101      be replaced with a leaf node (not merely one with an open left or
  3102      right), to make sure that lefts and rights of descendents
  3103      correspond properly to bit masks.  We use the rightmost descendent
  3104      of x.  We could use any other leaf, but this is easy to locate and
  3105      tends to counteract removal of leftmosts elsewhere, and so keeps
  3106      paths shorter than minimally guaranteed.  This doesn't loop much
  3107      because on average a node in a tree is near the bottom.
  3108   3. If x is the base of a chain (i.e., has parent links) relink
  3109      x's parent and children to x's replacement (or null if none).
  3110 */
  3111 
  3112 #define unlink_large_chunk(M, X) {\
  3113   tchunkptr XP = X->parent;\
  3114   tchunkptr R;\
  3115   if (X->bk != X) {\
  3116     tchunkptr F = X->fd;\
  3117     R = X->bk;\
  3118     if (RTCHECK(ok_address(M, F))) {\
  3119       F->bk = R;\
  3120       R->fd = F;\
  3121     }\
  3122     else {\
  3123       CORRUPTION_ERROR_ACTION(M);\
  3124     }\
  3125   }\
  3126   else {\
  3127     tchunkptr* RP;\
  3128     if (((R = *(RP = &(X->child[1]))) != 0) ||\
  3129         ((R = *(RP = &(X->child[0]))) != 0)) {\
  3130       tchunkptr* CP;\
  3131       while ((*(CP = &(R->child[1])) != 0) ||\
  3132              (*(CP = &(R->child[0])) != 0)) {\
  3133         R = *(RP = CP);\
  3134       }\
  3135       if (RTCHECK(ok_address(M, RP)))\
  3136         *RP = 0;\
  3137       else {\
  3138         CORRUPTION_ERROR_ACTION(M);\
  3139       }\
  3140     }\
  3141   }\
  3142   if (XP != 0) {\
  3143     tbinptr* H = treebin_at(M, X->index);\
  3144     if (X == *H) {\
  3145       if ((*H = R) == 0) \
  3146         clear_treemap(M, X->index);\
  3147     }\
  3148     else if (RTCHECK(ok_address(M, XP))) {\
  3149       if (XP->child[0] == X) \
  3150         XP->child[0] = R;\
  3151       else \
  3152         XP->child[1] = R;\
  3153     }\
  3154     else\
  3155       CORRUPTION_ERROR_ACTION(M);\
  3156     if (R != 0) {\
  3157       if (RTCHECK(ok_address(M, R))) {\
  3158         tchunkptr C0, C1;\
  3159         R->parent = XP;\
  3160         if ((C0 = X->child[0]) != 0) {\
  3161           if (RTCHECK(ok_address(M, C0))) {\
  3162             R->child[0] = C0;\
  3163             C0->parent = R;\
  3164           }\
  3165           else\
  3166             CORRUPTION_ERROR_ACTION(M);\
  3167         }\
  3168         if ((C1 = X->child[1]) != 0) {\
  3169           if (RTCHECK(ok_address(M, C1))) {\
  3170             R->child[1] = C1;\
  3171             C1->parent = R;\
  3172           }\
  3173           else\
  3174             CORRUPTION_ERROR_ACTION(M);\
  3175         }\
  3176       }\
  3177       else\
  3178         CORRUPTION_ERROR_ACTION(M);\
  3179     }\
  3180   }\
  3181 }
  3182 
  3183 /* Relays to large vs small bin operations */
  3184 
  3185 #define insert_chunk(M, P, S)\
  3186   if (is_small(S)) insert_small_chunk(M, P, S)\
  3187   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  3188 
  3189 #define unlink_chunk(M, P, S)\
  3190   if (is_small(S)) unlink_small_chunk(M, P, S)\
  3191   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  3192 
  3193 
  3194 /* Relays to internal calls to malloc/free from realloc, memalign etc */
  3195 
  3196 #if ONLY_MSPACES
  3197 #define internal_malloc(m, b) mspace_malloc(m, b)
  3198 #define internal_free(m, mem) mspace_free(m,mem);
  3199 #else /* ONLY_MSPACES */
  3200 #if MSPACES
  3201 #define internal_malloc(m, b)\
  3202    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  3203 #define internal_free(m, mem)\
  3204    if (m == gm) dlfree(mem); else mspace_free(m,mem);
  3205 #else /* MSPACES */
  3206 #define internal_malloc(m, b) dlmalloc(b)
  3207 #define internal_free(m, mem) dlfree(mem)
  3208 #endif /* MSPACES */
  3209 #endif /* ONLY_MSPACES */
  3210 
  3211 /* -----------------------  Direct-mmapping chunks ----------------------- */
  3212 
  3213 /*
  3214   Directly mmapped chunks are set up with an offset to the start of
  3215   the mmapped region stored in the prev_foot field of the chunk. This
  3216   allows reconstruction of the required argument to MUNMAP when freed,
  3217   and also allows adjustment of the returned chunk to meet alignment
  3218   requirements (especially in memalign).  There is also enough space
  3219   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3220   the PINUSE bit so frees can be checked.
  3221 */
  3222 
  3223 /* Malloc using mmap */
  3224 static void* mmap_alloc(mstate m, size_t nb) {
  3225   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3226   if (mmsize > nb) {     /* Check for wrap around 0 */
  3227     char* mm = (char*)(DIRECT_MMAP(mmsize));
  3228     if (mm != CMFAIL) {
  3229       size_t offset = align_offset(chunk2mem(mm));
  3230       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3231       mchunkptr p = (mchunkptr)(mm + offset);
  3232       p->prev_foot = offset | IS_MMAPPED_BIT;
  3233       (p)->head = (psize|CINUSE_BIT);
  3234       mark_inuse_foot(m, p, psize);
  3235       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3236       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  3237 
  3238       if (mm < m->least_addr)
  3239         m->least_addr = mm;
  3240       if ((m->footprint += mmsize) > m->max_footprint)
  3241         m->max_footprint = m->footprint;
  3242       assert(is_aligned(chunk2mem(p)));
  3243       check_mmapped_chunk(m, p);
  3244       return chunk2mem(p);
  3245     }
  3246   }
  3247   return 0;
  3248 }
  3249 
  3250 /* Realloc using mmap */
  3251 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  3252   size_t oldsize = chunksize(oldp);
  3253   if (is_small(nb)) /* Can't shrink mmap regions below small size */
  3254     return 0;
  3255   /* Keep old chunk if big enough but not too big */
  3256   if (oldsize >= nb + SIZE_T_SIZE &&
  3257       (oldsize - nb) <= (mparams.granularity << 1))
  3258     return oldp;
  3259   else {
  3260     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  3261     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  3262     size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  3263                                          CHUNK_ALIGN_MASK);
  3264     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
  3265                                   oldmmsize, newmmsize, 1);
  3266     if (cp != CMFAIL) {
  3267       mchunkptr newp = (mchunkptr)(cp + offset);
  3268       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  3269       newp->head = (psize|CINUSE_BIT);
  3270       mark_inuse_foot(m, newp, psize);
  3271       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  3272       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
  3273 
  3274       if (cp < m->least_addr)
  3275         m->least_addr = cp;
  3276       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  3277         m->max_footprint = m->footprint;
  3278       check_mmapped_chunk(m, newp);
  3279       return newp;
  3280     }
  3281   }
  3282   return 0;
  3283 }
  3284 
  3285 /* -------------------------- mspace management -------------------------- */
  3286 
  3287 /* Initialize top chunk and its size */
  3288 static void init_top(mstate m, mchunkptr p, size_t psize) {
  3289   /* Ensure alignment */
  3290   size_t offset = align_offset(chunk2mem(p));
  3291   p = (mchunkptr)((char*)p + offset);
  3292   psize -= offset;
  3293 
  3294   m->top = p;
  3295   m->topsize = psize;
  3296   p->head = psize | PINUSE_BIT;
  3297   /* set size of fake trailing chunk holding overhead space only once */
  3298   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3299   m->trim_check = mparams.trim_threshold; /* reset on each update */
  3300 }
  3301 
  3302 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3303 static void init_bins(mstate m) {
  3304   /* Establish circular links for smallbins */
  3305   bindex_t i;
  3306   for (i = 0; i < NSMALLBINS; ++i) {
  3307     sbinptr bin = smallbin_at(m,i);
  3308     bin->fd = bin->bk = bin;
  3309   }
  3310 }
  3311 
  3312 #if PROCEED_ON_ERROR
  3313 
  3314 /* default corruption action */
  3315 static void reset_on_error(mstate m) {
  3316   int i;
  3317   ++malloc_corruption_error_count;
  3318   /* Reinitialize fields to forget about all memory */
  3319   m->smallbins = m->treebins = 0;
  3320   m->dvsize = m->topsize = 0;
  3321   m->seg.base = 0;
  3322   m->seg.size = 0;
  3323   m->seg.next = 0;
  3324   m->top = m->dv = 0;
  3325   for (i = 0; i < NTREEBINS; ++i)
  3326     *treebin_at(m, i) = 0;
  3327   init_bins(m);
  3328 }
  3329 #endif /* PROCEED_ON_ERROR */
  3330 
  3331 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3332 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  3333                            size_t nb) {
  3334   mchunkptr p = align_as_chunk(newbase);
  3335   mchunkptr oldfirst = align_as_chunk(oldbase);
  3336   size_t psize = (char*)oldfirst - (char*)p;
  3337   mchunkptr q = chunk_plus_offset(p, nb);
  3338   size_t qsize = psize - nb;
  3339   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3340 
  3341   assert((char*)oldfirst > (char*)q);
  3342   assert(pinuse(oldfirst));
  3343   assert(qsize >= MIN_CHUNK_SIZE);
  3344 
  3345   /* consolidate remainder with first chunk of old base */
  3346   if (oldfirst == m->top) {
  3347     size_t tsize = m->topsize += qsize;
  3348     m->top = q;
  3349     q->head = tsize | PINUSE_BIT;
  3350     check_top_chunk(m, q);
  3351   }
  3352   else if (oldfirst == m->dv) {
  3353     size_t dsize = m->dvsize += qsize;
  3354     m->dv = q;
  3355     set_size_and_pinuse_of_free_chunk(q, dsize);
  3356   }
  3357   else {
  3358     if (!cinuse(oldfirst)) {
  3359       size_t nsize = chunksize(oldfirst);
  3360       unlink_chunk(m, oldfirst, nsize);
  3361       oldfirst = chunk_plus_offset(oldfirst, nsize);
  3362       qsize += nsize;
  3363     }
  3364     set_free_with_pinuse(q, qsize, oldfirst);
  3365     insert_chunk(m, q, qsize);
  3366     check_free_chunk(m, q);
  3367   }
  3368 
  3369   check_malloced_chunk(m, chunk2mem(p), nb);
  3370   return chunk2mem(p);
  3371 }
  3372 
  3373 
  3374 /* Add a segment to hold a new noncontiguous region */
  3375 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  3376   /* Determine locations and sizes of segment, fenceposts, old top */
  3377   char* old_top = (char*)m->top;
  3378   msegmentptr oldsp = segment_holding(m, old_top);
  3379   char* old_end = oldsp->base + oldsp->size;
  3380   size_t ssize = pad_request(sizeof(struct malloc_segment));
  3381   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3382   size_t offset = align_offset(chunk2mem(rawsp));
  3383   char* asp = rawsp + offset;
  3384   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  3385   mchunkptr sp = (mchunkptr)csp;
  3386   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  3387   mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3388   mchunkptr p = tnext;
  3389   int nfences = 0;
  3390 
  3391   /* reset top to new space */
  3392   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3393 
  3394   /* Set up segment record */
  3395   assert(is_aligned(ss));
  3396   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3397   *ss = m->seg; /* Push current record */
  3398   m->seg.base = tbase;
  3399   m->seg.size = tsize;
  3400   m->seg.sflags = mmapped;
  3401   m->seg.next = ss;
  3402 
  3403   /* Insert trailing fenceposts */
  3404   for (;;) {
  3405     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3406     p->head = FENCEPOST_HEAD;
  3407     ++nfences;
  3408     if ((char*)(&(nextp->head)) < old_end)
  3409       p = nextp;
  3410     else
  3411       break;
  3412   }
  3413   assert(nfences >= 2);
  3414 
  3415   /* Insert the rest of old top into a bin as an ordinary free chunk */
  3416   if (csp != old_top) {
  3417     mchunkptr q = (mchunkptr)old_top;
  3418     size_t psize = csp - old_top;
  3419     mchunkptr tn = chunk_plus_offset(q, psize);
  3420     set_free_with_pinuse(q, psize, tn);
  3421     insert_chunk(m, q, psize);
  3422   }
  3423 
  3424   check_top_chunk(m, m->top);
  3425 }
  3426 
  3427 /* -------------------------- System allocation -------------------------- */
  3428 
  3429 /* Get memory from system using MORECORE or MMAP */
  3430 static void* sys_alloc(mstate m, size_t nb) {
  3431   char* tbase = CMFAIL;
  3432   size_t tsize = 0;
  3433   flag_t mmap_flag = 0;
  3434 
  3435   init_mparams();
  3436 
  3437   /* Directly map large chunks */
  3438   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3439     void* mem = mmap_alloc(m, nb);
  3440     if (mem != 0)
  3441       return mem;
  3442   }
  3443 
  3444   /*
  3445     Try getting memory in any of three ways (in most-preferred to
  3446     least-preferred order):
  3447     1. A call to MORECORE that can normally contiguously extend memory.
  3448        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3449        or main space is mmapped or a previous contiguous call failed)
  3450     2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3451        Note that under the default settings, if MORECORE is unable to
  3452        fulfill a request, and HAVE_MMAP is true, then mmap is
  3453        used as a noncontiguous system allocator. This is a useful backup
  3454        strategy for systems with holes in address spaces -- in this case
  3455        sbrk cannot contiguously expand the heap, but mmap may be able to
  3456        find space.
  3457     3. A call to MORECORE that cannot usually contiguously extend memory.
  3458        (disabled if not HAVE_MORECORE)
  3459   */
  3460 
  3461   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3462     char* br = CMFAIL;
  3463     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  3464     size_t asize = 0;
  3465     ACQUIRE_MORECORE_LOCK();
  3466 
  3467     if (ss == 0) {  /* First time through or recovery */
  3468       char* base = (char*)CALL_MORECORE(0);
  3469       if (base != CMFAIL) {
  3470         asize = granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE);
  3471         /* Adjust to end on a page boundary */
  3472         if (!is_page_aligned(base))
  3473           asize += (page_align((size_t)base) - (size_t)base);
  3474         /* Can't call MORECORE if size is negative when treated as signed */
  3475         if (asize < HALF_MAX_SIZE_T &&
  3476             (br = (char*)(CALL_MORECORE(asize))) == base) {
  3477           tbase = base;
  3478           tsize = asize;
  3479         }
  3480       }
  3481     }
  3482     else {
  3483       /* Subtract out existing available top space from MORECORE request. */
  3484       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE);
  3485       /* Use mem here only if it did continuously extend old space */
  3486       if (asize < HALF_MAX_SIZE_T &&
  3487           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  3488         tbase = br;
  3489         tsize = asize;
  3490       }
  3491     }
  3492 
  3493     if (tbase == CMFAIL) {    /* Cope with partial failure */
  3494       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  3495         if (asize < HALF_MAX_SIZE_T &&
  3496             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3497           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE - asize);
  3498           if (esize < HALF_MAX_SIZE_T) {
  3499             char* end = (char*)CALL_MORECORE(esize);
  3500             if (end != CMFAIL)
  3501               asize += esize;
  3502             else {            /* Can't use; try to release */
  3503               end = (char*)CALL_MORECORE(-asize);
  3504               br = CMFAIL;
  3505             }
  3506           }
  3507         }
  3508       }
  3509       if (br != CMFAIL) {    /* Use the space we did get */
  3510         tbase = br;
  3511         tsize = asize;
  3512       }
  3513       else
  3514         disable_contiguous(m); /* Don't try contiguous path in the future */
  3515     }
  3516 
  3517     RELEASE_MORECORE_LOCK();
  3518   }
  3519 
  3520   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  3521     size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
  3522     size_t rsize = granularity_align(req);
  3523     if (rsize > nb) { /* Fail if wraps around zero */
  3524       char* mp = (char*)(CALL_MMAP(rsize));
  3525       if (mp != CMFAIL) {
  3526         tbase = mp;
  3527         tsize = rsize;
  3528         mmap_flag = IS_MMAPPED_BIT;
  3529       }
  3530     }
  3531   }
  3532 
  3533   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
  3534     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE);
  3535     if (asize < HALF_MAX_SIZE_T) {
  3536       char* br = CMFAIL;
  3537       char* end = CMFAIL;
  3538       ACQUIRE_MORECORE_LOCK();
  3539       br = (char*)(CALL_MORECORE(asize));
  3540       end = (char*)(CALL_MORECORE(0));
  3541       RELEASE_MORECORE_LOCK();
  3542       if (br != CMFAIL && end != CMFAIL && br < end) {
  3543         size_t ssize = end - br;
  3544         if (ssize > nb + TOP_FOOT_SIZE) {
  3545           tbase = br;
  3546           tsize = ssize;
  3547         }
  3548       }
  3549     }
  3550   }
  3551 
  3552   if (tbase != CMFAIL) {
  3553 
  3554     if ((m->footprint += tsize) > m->max_footprint)
  3555       m->max_footprint = m->footprint;
  3556 
  3557     if (!is_initialized(m)) { /* first-time initialization */
  3558       m->seg.base = m->least_addr = tbase;
  3559       m->seg.size = tsize;
  3560       m->seg.sflags = mmap_flag;
  3561       m->magic = mparams.magic;
  3562       init_bins(m);
  3563       if (is_global(m)) 
  3564         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3565       else {
  3566         /* Offset top by embedded malloc_state */
  3567         mchunkptr mn = next_chunk(mem2chunk(m));
  3568         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
  3569       }
  3570     }
  3571 
  3572     else {
  3573       /* Try to merge with an existing segment */
  3574       msegmentptr sp = &m->seg;
  3575       while (sp != 0 && tbase != sp->base + sp->size)
  3576         sp = sp->next;
  3577       if (sp != 0 &&
  3578           !is_extern_segment(sp) &&
  3579           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
  3580           segment_holds(sp, m->top)) { /* append */
  3581         sp->size += tsize;
  3582         init_top(m, m->top, m->topsize + tsize);
  3583       }
  3584       else {
  3585         if (tbase < m->least_addr)
  3586           m->least_addr = tbase;
  3587         sp = &m->seg;
  3588         while (sp != 0 && sp->base != tbase + tsize)
  3589           sp = sp->next;
  3590         if (sp != 0 &&
  3591             !is_extern_segment(sp) &&
  3592             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  3593           char* oldbase = sp->base;
  3594           sp->base = tbase;
  3595           sp->size += tsize;
  3596           return prepend_alloc(m, tbase, oldbase, nb);
  3597         }
  3598         else
  3599           add_segment(m, tbase, tsize, mmap_flag);
  3600       }
  3601     }
  3602 
  3603     if (nb < m->topsize) { /* Allocate from new or extended top space */
  3604       size_t rsize = m->topsize -= nb;
  3605       mchunkptr p = m->top;
  3606       mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3607       r->head = rsize | PINUSE_BIT;
  3608       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3609       check_top_chunk(m, m->top);
  3610       check_malloced_chunk(m, chunk2mem(p), nb);
  3611       return chunk2mem(p);
  3612     }
  3613   }
  3614 
  3615   MALLOC_FAILURE_ACTION;
  3616   return 0;
  3617 }
  3618 
  3619 /* -----------------------  system deallocation -------------------------- */
  3620 
  3621 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3622 static size_t release_unused_segments(mstate m) {
  3623   size_t released = 0;
  3624   msegmentptr pred = &m->seg;
  3625   msegmentptr sp = pred->next;
  3626   while (sp != 0) {
  3627     char* base = sp->base;
  3628     size_t size = sp->size;
  3629     msegmentptr next = sp->next;
  3630     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3631       mchunkptr p = align_as_chunk(base);
  3632       size_t psize = chunksize(p);
  3633       /* Can unmap if first chunk holds entire segment and not pinned */
  3634       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
  3635         tchunkptr tp = (tchunkptr)p;
  3636         assert(segment_holds(sp, (char*)sp));
  3637         if (p == m->dv) {
  3638           m->dv = 0;
  3639           m->dvsize = 0;
  3640         }
  3641         else {
  3642           unlink_large_chunk(m, tp);
  3643         }
  3644         if (CALL_MUNMAP(base, size) == 0) {
  3645           released += size;
  3646           m->footprint -= size;
  3647           /* unlink obsoleted record */
  3648           sp = pred;
  3649           sp->next = next;
  3650         }
  3651         else { /* back out if cannot unmap */
  3652           insert_large_chunk(m, tp, psize);
  3653         }
  3654       }
  3655     }
  3656     pred = sp;
  3657     sp = next;
  3658   }
  3659   return released;
  3660 }
  3661 
  3662 static int sys_trim(mstate m, size_t pad) {
  3663   size_t released = 0;
  3664   if (pad < MAX_REQUEST && is_initialized(m)) {
  3665     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
  3666 
  3667     if (m->topsize > pad) {
  3668       /* Shrink top space in granularity-size units, keeping at least one */
  3669       size_t unit = mparams.granularity;
  3670       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  3671                       SIZE_T_ONE) * unit;
  3672       msegmentptr sp = segment_holding(m, (char*)m->top);
  3673 
  3674       if (!is_extern_segment(sp)) {
  3675         if (is_mmapped_segment(sp)) {
  3676           if (HAVE_MMAP &&
  3677               sp->size >= extra &&
  3678               !has_segment_link(m, sp)) { /* can't shrink if pinned */
  3679             size_t newsize = sp->size - extra;
  3680             /* Prefer mremap, fall back to munmap */
  3681             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
  3682                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  3683               released = extra;
  3684             }
  3685           }
  3686         }
  3687         else if (HAVE_MORECORE) {
  3688           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
  3689             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  3690           ACQUIRE_MORECORE_LOCK();
  3691           {
  3692             /* Make sure end of memory is where we last set it. */
  3693             char* old_br = (char*)(CALL_MORECORE(0));
  3694             if (old_br == sp->base + sp->size) {
  3695               char* rel_br = (char*)(CALL_MORECORE(-extra));
  3696               char* new_br = (char*)(CALL_MORECORE(0));
  3697               if (rel_br != CMFAIL && new_br < old_br)
  3698                 released = old_br - new_br;
  3699             }
  3700           }
  3701           RELEASE_MORECORE_LOCK();
  3702         }
  3703       }
  3704 
  3705       if (released != 0) {
  3706         sp->size -= released;
  3707         m->footprint -= released;
  3708         init_top(m, m->top, m->topsize - released);
  3709         check_top_chunk(m, m->top);
  3710       }
  3711     }
  3712 
  3713     /* Unmap any unused mmapped segments */
  3714     if (HAVE_MMAP) 
  3715       released += release_unused_segments(m);
  3716 
  3717     /* On failure, disable autotrim to avoid repeated failed future calls */
  3718     if (released == 0)
  3719       m->trim_check = MAX_SIZE_T;
  3720   }
  3721 
  3722   return (released != 0)? 1 : 0;
  3723 }
  3724 
  3725 /* ---------------------------- malloc support --------------------------- */
  3726 
  3727 /* allocate a large request from the best fitting chunk in a treebin */
  3728 static void* tmalloc_large(mstate m, size_t nb) {
  3729   tchunkptr v = 0;
  3730   size_t rsize = -nb; /* Unsigned negation */
  3731   tchunkptr t;
  3732   bindex_t idx;
  3733   compute_tree_index(nb, idx);
  3734 
  3735   if ((t = *treebin_at(m, idx)) != 0) {
  3736     /* Traverse tree for this bin looking for node with size == nb */
  3737     size_t sizebits = nb << leftshift_for_tree_index(idx);
  3738     tchunkptr rst = 0;  /* The deepest untaken right subtree */
  3739     for (;;) {
  3740       tchunkptr rt;
  3741       size_t trem = chunksize(t) - nb;
  3742       if (trem < rsize) {
  3743         v = t;
  3744         if ((rsize = trem) == 0)
  3745           break;
  3746       }
  3747       rt = t->child[1];
  3748       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  3749       if (rt != 0 && rt != t)
  3750         rst = rt;
  3751       if (t == 0) {
  3752         t = rst; /* set t to least subtree holding sizes > nb */
  3753         break;
  3754       }
  3755       sizebits <<= 1;
  3756     }
  3757   }
  3758 
  3759   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
  3760     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  3761     if (leftbits != 0) {
  3762       bindex_t i;
  3763       binmap_t leastbit = least_bit(leftbits);
  3764       compute_bit2idx(leastbit, i);
  3765       t = *treebin_at(m, i);
  3766     }
  3767   }
  3768 
  3769   while (t != 0) { /* find smallest of tree or subtree */
  3770     size_t trem = chunksize(t) - nb;
  3771     if (trem < rsize) {
  3772       rsize = trem;
  3773       v = t;
  3774     }
  3775     t = leftmost_child(t);
  3776   }
  3777 
  3778   /*  If dv is a better fit, return 0 so malloc will use it */
  3779   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
  3780     if (RTCHECK(ok_address(m, v))) { /* split */
  3781       mchunkptr r = chunk_plus_offset(v, nb);
  3782       assert(chunksize(v) == rsize + nb);
  3783       if (RTCHECK(ok_next(v, r))) {
  3784         unlink_large_chunk(m, v);
  3785         if (rsize < MIN_CHUNK_SIZE)
  3786           set_inuse_and_pinuse(m, v, (rsize + nb));
  3787         else {
  3788           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3789           set_size_and_pinuse_of_free_chunk(r, rsize);
  3790           insert_chunk(m, r, rsize);
  3791         }
  3792         return chunk2mem(v);
  3793       }
  3794     }
  3795     CORRUPTION_ERROR_ACTION(m);
  3796   }
  3797   return 0;
  3798 }
  3799 
  3800 /* allocate a small request from the best fitting chunk in a treebin */
  3801 static void* tmalloc_small(mstate m, size_t nb) {
  3802   tchunkptr t, v;
  3803   size_t rsize;
  3804   bindex_t i;
  3805   binmap_t leastbit = least_bit(m->treemap);
  3806   compute_bit2idx(leastbit, i);
  3807 
  3808   v = t = *treebin_at(m, i);
  3809   rsize = chunksize(t) - nb;
  3810 
  3811   while ((t = leftmost_child(t)) != 0) {
  3812     size_t trem = chunksize(t) - nb;
  3813     if (trem < rsize) {
  3814       rsize = trem;
  3815       v = t;
  3816     }
  3817   }
  3818 
  3819   if (RTCHECK(ok_address(m, v))) {
  3820     mchunkptr r = chunk_plus_offset(v, nb);
  3821     assert(chunksize(v) == rsize + nb);
  3822     if (RTCHECK(ok_next(v, r))) {
  3823       unlink_large_chunk(m, v);
  3824       if (rsize < MIN_CHUNK_SIZE)
  3825         set_inuse_and_pinuse(m, v, (rsize + nb));
  3826       else {
  3827         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3828         set_size_and_pinuse_of_free_chunk(r, rsize);
  3829         replace_dv(m, r, rsize);
  3830       }
  3831       return chunk2mem(v);
  3832     }
  3833   }
  3834 
  3835   CORRUPTION_ERROR_ACTION(m);
  3836   return 0;
  3837 }
  3838 
  3839 /* --------------------------- realloc support --------------------------- */
  3840 
  3841 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
  3842   if (bytes >= MAX_REQUEST) {
  3843     MALLOC_FAILURE_ACTION;
  3844     return 0;
  3845   }
  3846   if (!PREACTION(m)) {
  3847     mchunkptr oldp = mem2chunk(oldmem);
  3848     size_t oldsize = chunksize(oldp);
  3849     mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3850     mchunkptr newp = 0;
  3851     void* extra = 0;
  3852 
  3853     /* Try to either shrink or extend into top. Else malloc-copy-free */
  3854 
  3855     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3856                 ok_next(oldp, next) && ok_pinuse(next))) {
  3857       size_t nb = request2size(bytes);
  3858       if (is_mmapped(oldp))
  3859         newp = mmap_resize(m, oldp, nb);
  3860       else if (oldsize >= nb) { /* already big enough */
  3861         size_t rsize = oldsize - nb;
  3862         newp = oldp;
  3863         if (rsize >= MIN_CHUNK_SIZE) {
  3864           mchunkptr remainder = chunk_plus_offset(newp, nb);
  3865           set_inuse(m, newp, nb);
  3866           set_inuse(m, remainder, rsize);
  3867           extra = chunk2mem(remainder);
  3868         }
  3869       }
  3870       else if (next == m->top && oldsize + m->topsize > nb) {
  3871         /* Expand into top */
  3872         size_t newsize = oldsize + m->topsize;
  3873         size_t newtopsize = newsize - nb;
  3874         mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3875         set_inuse(m, oldp, nb);
  3876         newtop->head = newtopsize |PINUSE_BIT;
  3877         m->top = newtop;
  3878         m->topsize = newtopsize;
  3879         newp = oldp;
  3880       }
  3881     }
  3882     else {
  3883       USAGE_ERROR_ACTION(m, oldmem);
  3884       POSTACTION(m);
  3885       return 0;
  3886     }
  3887 
  3888     POSTACTION(m);
  3889 
  3890     if (newp != 0) {
  3891       if (extra != 0) {
  3892         internal_free(m, extra);
  3893       }
  3894       check_inuse_chunk(m, newp);
  3895       return chunk2mem(newp);
  3896     }
  3897     else {
  3898       void* newmem = internal_malloc(m, bytes);
  3899       if (newmem != 0) {
  3900         size_t oc = oldsize - overhead_for(oldp);
  3901         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
  3902         internal_free(m, oldmem);
  3903       }
  3904       return newmem;
  3905     }
  3906   }
  3907   return 0;
  3908 }
  3909 
  3910 /* --------------------------- memalign support -------------------------- */
  3911 
  3912 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
  3913   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
  3914     return internal_malloc(m, bytes);
  3915   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
  3916     alignment = MIN_CHUNK_SIZE;
  3917   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
  3918     size_t a = MALLOC_ALIGNMENT << 1;
  3919     while (a < alignment) a <<= 1;
  3920     alignment = a;
  3921   }
  3922   
  3923   if (bytes >= MAX_REQUEST - alignment) {
  3924     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
  3925       MALLOC_FAILURE_ACTION;
  3926     }
  3927   }
  3928   else {
  3929     size_t nb = request2size(bytes);
  3930     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  3931     char* mem = (char*)internal_malloc(m, req);
  3932     if (mem != 0) {
  3933       void* leader = 0;
  3934       void* trailer = 0;
  3935       mchunkptr p = mem2chunk(mem);
  3936 
  3937       if (PREACTION(m)) return 0;
  3938       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
  3939         /*
  3940           Find an aligned spot inside chunk.  Since we need to give
  3941           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  3942           the first calculation places us at a spot with less than
  3943           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  3944           We've allocated enough total room so that this is always
  3945           possible.
  3946         */
  3947         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
  3948                                                        alignment -
  3949                                                        SIZE_T_ONE)) &
  3950                                              -alignment));
  3951         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
  3952           br : br+alignment;
  3953         mchunkptr newp = (mchunkptr)pos;
  3954         size_t leadsize = pos - (char*)(p);
  3955         size_t newsize = chunksize(p) - leadsize;
  3956 
  3957         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
  3958           newp->prev_foot = p->prev_foot + leadsize;
  3959           newp->head = (newsize|CINUSE_BIT);
  3960         }
  3961         else { /* Otherwise, give back leader, use the rest */
  3962           set_inuse(m, newp, newsize);
  3963           set_inuse(m, p, leadsize);
  3964           leader = chunk2mem(p);
  3965         }
  3966         p = newp;
  3967       }
  3968 
  3969       /* Give back spare room at the end */
  3970       if (!is_mmapped(p)) {
  3971         size_t size = chunksize(p);
  3972         if (size > nb + MIN_CHUNK_SIZE) {
  3973           size_t remainder_size = size - nb;
  3974           mchunkptr remainder = chunk_plus_offset(p, nb);
  3975           set_inuse(m, p, nb);
  3976           set_inuse(m, remainder, remainder_size);
  3977           trailer = chunk2mem(remainder);
  3978         }
  3979       }
  3980 
  3981       assert (chunksize(p) >= nb);
  3982       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
  3983       check_inuse_chunk(m, p);
  3984       POSTACTION(m);
  3985       if (leader != 0) {
  3986         internal_free(m, leader);
  3987       }
  3988       if (trailer != 0) {
  3989         internal_free(m, trailer);
  3990       }
  3991       return chunk2mem(p);
  3992     }
  3993   }
  3994   return 0;
  3995 }
  3996 
  3997 /* ------------------------ comalloc/coalloc support --------------------- */
  3998 
  3999 static void** ialloc(mstate m,
  4000                      size_t n_elements,
  4001                      size_t* sizes,
  4002                      int opts,
  4003                      void* chunks[]) {
  4004   /*
  4005     This provides common support for independent_X routines, handling
  4006     all of the combinations that can result.
  4007 
  4008     The opts arg has:
  4009     bit 0 set if all elements are same size (using sizes[0])
  4010     bit 1 set if elements should be zeroed
  4011   */
  4012 
  4013   size_t    element_size;   /* chunksize of each element, if all same */
  4014   size_t    contents_size;  /* total size of elements */
  4015   size_t    array_size;     /* request size of pointer array */
  4016   void*     mem;            /* malloced aggregate space */
  4017   mchunkptr p;              /* corresponding chunk */
  4018   size_t    remainder_size; /* remaining bytes while splitting */
  4019   void**    marray;         /* either "chunks" or malloced ptr array */
  4020   mchunkptr array_chunk;    /* chunk for malloced ptr array */
  4021   flag_t    was_enabled;    /* to disable mmap */
  4022   size_t    size;
  4023   size_t    i;
  4024 
  4025   /* compute array length, if needed */
  4026   if (chunks != 0) {
  4027     if (n_elements == 0)
  4028       return chunks; /* nothing to do */
  4029     marray = chunks;
  4030     array_size = 0;
  4031   }
  4032   else {
  4033     /* if empty req, must still return chunk representing empty array */
  4034     if (n_elements == 0)
  4035       return (void**)internal_malloc(m, 0);
  4036     marray = 0;
  4037     array_size = request2size(n_elements * (sizeof(void*)));
  4038   }
  4039 
  4040   /* compute total element size */
  4041   if (opts & 0x1) { /* all-same-size */
  4042     element_size = request2size(*sizes);
  4043     contents_size = n_elements * element_size;
  4044   }
  4045   else { /* add up all the sizes */
  4046     element_size = 0;
  4047     contents_size = 0;
  4048     for (i = 0; i != n_elements; ++i)
  4049       contents_size += request2size(sizes[i]);
  4050   }
  4051 
  4052   size = contents_size + array_size;
  4053 
  4054   /*
  4055      Allocate the aggregate chunk.  First disable direct-mmapping so
  4056      malloc won't use it, since we would not be able to later
  4057      free/realloc space internal to a segregated mmap region.
  4058   */
  4059   was_enabled = use_mmap(m);
  4060   disable_mmap(m);
  4061   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  4062   if (was_enabled)
  4063     enable_mmap(m);
  4064   if (mem == 0)
  4065     return 0;
  4066 
  4067   if (PREACTION(m)) return 0;
  4068   p = mem2chunk(mem);
  4069   remainder_size = chunksize(p);
  4070 
  4071   assert(!is_mmapped(p));
  4072 
  4073   if (opts & 0x2) {       /* optionally clear the elements */
  4074     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  4075   }
  4076 
  4077   /* If not provided, allocate the pointer array as final part of chunk */
  4078   if (marray == 0) {
  4079     size_t  array_chunk_size;
  4080     array_chunk = chunk_plus_offset(p, contents_size);
  4081     array_chunk_size = remainder_size - contents_size;
  4082     marray = (void**) (chunk2mem(array_chunk));
  4083     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  4084     remainder_size = contents_size;
  4085   }
  4086 
  4087   /* split out elements */
  4088   for (i = 0; ; ++i) {
  4089     marray[i] = chunk2mem(p);
  4090     if (i != n_elements-1) {
  4091       if (element_size != 0)
  4092         size = element_size;
  4093       else
  4094         size = request2size(sizes[i]);
  4095       remainder_size -= size;
  4096       set_size_and_pinuse_of_inuse_chunk(m, p, size);
  4097       p = chunk_plus_offset(p, size);
  4098     }
  4099     else { /* the final element absorbs any overallocation slop */
  4100       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  4101       break;
  4102     }
  4103   }
  4104 
  4105 #if DEBUG
  4106   if (marray != chunks) {
  4107     /* final element must have exactly exhausted chunk */
  4108     if (element_size != 0) {
  4109       assert(remainder_size == element_size);
  4110     }
  4111     else {
  4112       assert(remainder_size == request2size(sizes[i]));
  4113     }
  4114     check_inuse_chunk(m, mem2chunk(marray));
  4115   }
  4116   for (i = 0; i != n_elements; ++i)
  4117     check_inuse_chunk(m, mem2chunk(marray[i]));
  4118 
  4119 #endif /* DEBUG */
  4120 
  4121   POSTACTION(m);
  4122   return marray;
  4123 }
  4124 
  4125 
  4126 /* -------------------------- public routines ---------------------------- */
  4127 
  4128 #if !ONLY_MSPACES
  4129 
  4130 void* dlmalloc(size_t bytes) {
  4131   /*
  4132      Basic algorithm:
  4133      If a small request (< 256 bytes minus per-chunk overhead):
  4134        1. If one exists, use a remainderless chunk in associated smallbin.
  4135           (Remainderless means that there are too few excess bytes to
  4136           represent as a chunk.)
  4137        2. If it is big enough, use the dv chunk, which is normally the
  4138           chunk adjacent to the one used for the most recent small request.
  4139        3. If one exists, split the smallest available chunk in a bin,
  4140           saving remainder in dv.
  4141        4. If it is big enough, use the top chunk.
  4142        5. If available, get memory from system and use it
  4143      Otherwise, for a large request:
  4144        1. Find the smallest available binned chunk that fits, and use it
  4145           if it is better fitting than dv chunk, splitting if necessary.
  4146        2. If better fitting than any binned chunk, use the dv chunk.
  4147        3. If it is big enough, use the top chunk.
  4148        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4149        5. If available, get memory from system and use it
  4150 
  4151      The ugly goto's here ensure that postaction occurs along all paths.
  4152   */
  4153 
  4154   if (!PREACTION(gm)) {
  4155     void* mem;
  4156     size_t nb;
  4157     if (bytes <= MAX_SMALL_REQUEST) {
  4158       bindex_t idx;
  4159       binmap_t smallbits;
  4160       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4161       idx = small_index(nb);
  4162       smallbits = gm->smallmap >> idx;
  4163 
  4164       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4165         mchunkptr b, p;
  4166         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4167         b = smallbin_at(gm, idx);
  4168         p = b->fd;
  4169         assert(chunksize(p) == small_index2size(idx));
  4170         unlink_first_small_chunk(gm, b, p, idx);
  4171         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4172         mem = chunk2mem(p);
  4173         check_malloced_chunk(gm, mem, nb);
  4174         goto postaction;
  4175       }
  4176 
  4177       else if (nb > gm->dvsize) {
  4178         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4179           mchunkptr b, p, r;
  4180           size_t rsize;
  4181           bindex_t i;
  4182           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4183           binmap_t leastbit = least_bit(leftbits);
  4184           compute_bit2idx(leastbit, i);
  4185           b = smallbin_at(gm, i);
  4186           p = b->fd;
  4187           assert(chunksize(p) == small_index2size(i));
  4188           unlink_first_small_chunk(gm, b, p, i);
  4189           rsize = small_index2size(i) - nb;
  4190           /* Fit here cannot be remainderless if 4byte sizes */
  4191           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4192             set_inuse_and_pinuse(gm, p, small_index2size(i));
  4193           else {
  4194             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4195             r = chunk_plus_offset(p, nb);
  4196             set_size_and_pinuse_of_free_chunk(r, rsize);
  4197             replace_dv(gm, r, rsize);
  4198           }
  4199           mem = chunk2mem(p);
  4200           check_malloced_chunk(gm, mem, nb);
  4201           goto postaction;
  4202         }
  4203 
  4204         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  4205           check_malloced_chunk(gm, mem, nb);
  4206           goto postaction;
  4207         }
  4208       }
  4209     }
  4210     else if (bytes >= MAX_REQUEST)
  4211       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4212     else {
  4213       nb = pad_request(bytes);
  4214       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  4215         check_malloced_chunk(gm, mem, nb);
  4216         goto postaction;
  4217       }
  4218     }
  4219 
  4220     if (nb <= gm->dvsize) {
  4221       size_t rsize = gm->dvsize - nb;
  4222       mchunkptr p = gm->dv;
  4223       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4224         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  4225         gm->dvsize = rsize;
  4226         set_size_and_pinuse_of_free_chunk(r, rsize);
  4227         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4228       }
  4229       else { /* exhaust dv */
  4230         size_t dvs = gm->dvsize;
  4231         gm->dvsize = 0;
  4232         gm->dv = 0;
  4233         set_inuse_and_pinuse(gm, p, dvs);
  4234       }
  4235       mem = chunk2mem(p);
  4236       check_malloced_chunk(gm, mem, nb);
  4237       goto postaction;
  4238     }
  4239 
  4240     else if (nb < gm->topsize) { /* Split top */
  4241       size_t rsize = gm->topsize -= nb;
  4242       mchunkptr p = gm->top;
  4243       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  4244       r->head = rsize | PINUSE_BIT;
  4245       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4246       mem = chunk2mem(p);
  4247       check_top_chunk(gm, gm->top);
  4248       check_malloced_chunk(gm, mem, nb);
  4249       goto postaction;
  4250     }
  4251 
  4252     mem = sys_alloc(gm, nb);
  4253 
  4254   postaction:
  4255     POSTACTION(gm);
  4256     return mem;
  4257   }
  4258 
  4259   return 0;
  4260 }
  4261 
  4262 void dlfree(void* mem) {
  4263   /*
  4264      Consolidate freed chunks with preceeding or succeeding bordering
  4265      free chunks, if they exist, and then place in a bin.  Intermixed
  4266      with special cases for top, dv, mmapped chunks, and usage errors.
  4267   */
  4268 
  4269   if (mem != 0) {
  4270     mchunkptr p  = mem2chunk(mem);
  4271 #if FOOTERS
  4272     mstate fm = get_mstate_for(p);
  4273     if (!ok_magic(fm)) {
  4274       USAGE_ERROR_ACTION(fm, p);
  4275       return;
  4276     }
  4277 #else /* FOOTERS */
  4278 #define fm gm
  4279 #endif /* FOOTERS */
  4280     if (!PREACTION(fm)) {
  4281       check_inuse_chunk(fm, p);
  4282       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4283         size_t psize = chunksize(p);
  4284         mchunkptr next = chunk_plus_offset(p, psize);
  4285         if (!pinuse(p)) {
  4286           size_t prevsize = p->prev_foot;
  4287           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4288             prevsize &= ~IS_MMAPPED_BIT;
  4289             psize += prevsize + MMAP_FOOT_PAD;
  4290             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4291               fm->footprint -= psize;
  4292             goto postaction;
  4293           }
  4294           else {
  4295             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4296             psize += prevsize;
  4297             p = prev;
  4298             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4299               if (p != fm->dv) {
  4300                 unlink_chunk(fm, p, prevsize);
  4301               }
  4302               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4303                 fm->dvsize = psize;
  4304                 set_free_with_pinuse(p, psize, next);
  4305                 goto postaction;
  4306               }
  4307             }
  4308             else
  4309               goto erroraction;
  4310           }
  4311         }
  4312 
  4313         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4314           if (!cinuse(next)) {  /* consolidate forward */
  4315             if (next == fm->top) {
  4316               size_t tsize = fm->topsize += psize;
  4317               fm->top = p;
  4318               p->head = tsize | PINUSE_BIT;
  4319               if (p == fm->dv) {
  4320                 fm->dv = 0;
  4321                 fm->dvsize = 0;
  4322               }
  4323               if (should_trim(fm, tsize))
  4324                 sys_trim(fm, 0);
  4325               goto postaction;
  4326             }
  4327             else if (next == fm->dv) {
  4328               size_t dsize = fm->dvsize += psize;
  4329               fm->dv = p;
  4330               set_size_and_pinuse_of_free_chunk(p, dsize);
  4331               goto postaction;
  4332             }
  4333             else {
  4334               size_t nsize = chunksize(next);
  4335               psize += nsize;
  4336               unlink_chunk(fm, next, nsize);
  4337               set_size_and_pinuse_of_free_chunk(p, psize);
  4338               if (p == fm->dv) {
  4339                 fm->dvsize = psize;
  4340                 goto postaction;
  4341               }
  4342             }
  4343           }
  4344           else
  4345             set_free_with_pinuse(p, psize, next);
  4346           insert_chunk(fm, p, psize);
  4347           check_free_chunk(fm, p);
  4348           goto postaction;
  4349         }
  4350       }
  4351     erroraction:
  4352       USAGE_ERROR_ACTION(fm, p);
  4353     postaction:
  4354       POSTACTION(fm);
  4355     }
  4356   }
  4357 #if !FOOTERS
  4358 #undef fm
  4359 #endif /* FOOTERS */
  4360 }
  4361 
  4362 void* dlcalloc(size_t n_elements, size_t elem_size) {
  4363   void* mem;
  4364   size_t req = 0;
  4365   if (n_elements != 0) {
  4366     req = n_elements * elem_size;
  4367     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4368         (req / n_elements != elem_size))
  4369       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4370   }
  4371   mem = dlmalloc(req);
  4372   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4373     memset(mem, 0, req);
  4374   return mem;
  4375 }
  4376 
  4377 void* dlrealloc(void* oldmem, size_t bytes) {
  4378   if (oldmem == 0)
  4379     return dlmalloc(bytes);
  4380 #ifdef REALLOC_ZERO_BYTES_FREES
  4381   if (bytes == 0) {
  4382     dlfree(oldmem);
  4383     return 0;
  4384   }
  4385 #endif /* REALLOC_ZERO_BYTES_FREES */
  4386   else {
  4387 #if ! FOOTERS
  4388     mstate m = gm;
  4389 #else /* FOOTERS */
  4390     mstate m = get_mstate_for(mem2chunk(oldmem));
  4391     if (!ok_magic(m)) {
  4392       USAGE_ERROR_ACTION(m, oldmem);
  4393       return 0;
  4394     }
  4395 #endif /* FOOTERS */
  4396     return internal_realloc(m, oldmem, bytes);
  4397   }
  4398 }
  4399 
  4400 void* dlmemalign(size_t alignment, size_t bytes) {
  4401   return internal_memalign(gm, alignment, bytes);
  4402 }
  4403 
  4404 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
  4405                                  void* chunks[]) {
  4406   size_t sz = elem_size; /* serves as 1-element array */
  4407   return ialloc(gm, n_elements, &sz, 3, chunks);
  4408 }
  4409 
  4410 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
  4411                                    void* chunks[]) {
  4412   return ialloc(gm, n_elements, sizes, 0, chunks);
  4413 }
  4414 
  4415 void* dlvalloc(size_t bytes) {
  4416   size_t pagesz;
  4417   init_mparams();
  4418   pagesz = mparams.page_size;
  4419   return dlmemalign(pagesz, bytes);
  4420 }
  4421 
  4422 void* dlpvalloc(size_t bytes) {
  4423   size_t pagesz;
  4424   init_mparams();
  4425   pagesz = mparams.page_size;
  4426   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  4427 }
  4428 
  4429 int dlmalloc_trim(size_t pad) {
  4430   int result = 0;
  4431   if (!PREACTION(gm)) {
  4432     result = sys_trim(gm, pad);
  4433     POSTACTION(gm);
  4434   }
  4435   return result;
  4436 }
  4437 
  4438 size_t dlmalloc_footprint(void) {
  4439   return gm->footprint;
  4440 }
  4441 
  4442 size_t dlmalloc_max_footprint(void) {
  4443   return gm->max_footprint;
  4444 }
  4445 
  4446 #if !NO_MALLINFO
  4447 struct mallinfo dlmallinfo(void) {
  4448   return internal_mallinfo(gm);
  4449 }
  4450 #endif /* NO_MALLINFO */
  4451 
  4452 void dlmalloc_stats() {
  4453   internal_malloc_stats(gm);
  4454 }
  4455 
  4456 size_t dlmalloc_usable_size(void* mem) {
  4457   if (mem != 0) {
  4458     mchunkptr p = mem2chunk(mem);
  4459     if (cinuse(p))
  4460       return chunksize(p) - overhead_for(p);
  4461   }
  4462   return 0;
  4463 }
  4464 
  4465 int dlmallopt(int param_number, int value) {
  4466   return change_mparam(param_number, value);
  4467 }
  4468 
  4469 #endif /* !ONLY_MSPACES */
  4470 
  4471 /* ----------------------------- user mspaces ---------------------------- */
  4472 
  4473 #if MSPACES
  4474 
  4475 static mstate init_user_mstate(char* tbase, size_t tsize) {
  4476   size_t msize = pad_request(sizeof(struct malloc_state));
  4477   mchunkptr mn;
  4478   mchunkptr msp = align_as_chunk(tbase);
  4479   mstate m = (mstate)(chunk2mem(msp));
  4480   memset(m, 0, msize);
  4481   INITIAL_LOCK(&m->mutex);
  4482   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  4483   m->seg.base = m->least_addr = tbase;
  4484   m->seg.size = m->footprint = m->max_footprint = tsize;
  4485   m->magic = mparams.magic;
  4486   m->mflags = mparams.default_mflags;
  4487   disable_contiguous(m);
  4488   init_bins(m);
  4489   mn = next_chunk(mem2chunk(m));
  4490   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  4491   check_top_chunk(m, m->top);
  4492   return m;
  4493 }
  4494 
  4495 mspace create_mspace(size_t capacity, int locked) {
  4496   mstate m = 0;
  4497   size_t msize = pad_request(sizeof(struct malloc_state));
  4498   init_mparams(); /* Ensure pagesize etc initialized */
  4499 
  4500   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4501     size_t rs = ((capacity == 0)? mparams.granularity :
  4502                  (capacity + TOP_FOOT_SIZE + msize));
  4503     size_t tsize = granularity_align(rs);
  4504     char* tbase = (char*)(CALL_MMAP(tsize));
  4505     if (tbase != CMFAIL) {
  4506       m = init_user_mstate(tbase, tsize);
  4507       m->seg.sflags = IS_MMAPPED_BIT;
  4508       set_lock(m, locked);
  4509     }
  4510   }
  4511   return (mspace)m;
  4512 }
  4513 
  4514 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  4515   mstate m = 0;
  4516   size_t msize = pad_request(sizeof(struct malloc_state));
  4517   init_mparams(); /* Ensure pagesize etc initialized */
  4518 
  4519   if (capacity > msize + TOP_FOOT_SIZE &&
  4520       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4521     m = init_user_mstate((char*)base, capacity);
  4522     m->seg.sflags = EXTERN_BIT;
  4523     set_lock(m, locked);
  4524   }
  4525   return (mspace)m;
  4526 }
  4527 
  4528 size_t destroy_mspace(mspace msp) {
  4529   size_t freed = 0;
  4530   mstate ms = (mstate)msp;
  4531   if (ok_magic(ms)) {
  4532     msegmentptr sp = &ms->seg;
  4533     while (sp != 0) {
  4534       char* base = sp->base;
  4535       size_t size = sp->size;
  4536       flag_t flag = sp->sflags;
  4537       sp = sp->next;
  4538       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4539           CALL_MUNMAP(base, size) == 0)
  4540         freed += size;
  4541     }
  4542   }
  4543   else {
  4544     USAGE_ERROR_ACTION(ms,ms);
  4545   }
  4546   return freed;
  4547 }
  4548 
  4549 /*
  4550   mspace versions of routines are near-clones of the global
  4551   versions. This is not so nice but better than the alternatives.
  4552 */
  4553 
  4554 
  4555 void* mspace_malloc(mspace msp, size_t bytes) {
  4556   mstate ms = (mstate)msp;
  4557   if (!ok_magic(ms)) {
  4558     USAGE_ERROR_ACTION(ms,ms);
  4559     return 0;
  4560   }
  4561   if (!PREACTION(ms)) {
  4562     void* mem;
  4563     size_t nb;
  4564     if (bytes <= MAX_SMALL_REQUEST) {
  4565       bindex_t idx;
  4566       binmap_t smallbits;
  4567       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4568       idx = small_index(nb);
  4569       smallbits = ms->smallmap >> idx;
  4570 
  4571       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4572         mchunkptr b, p;
  4573         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4574         b = smallbin_at(ms, idx);
  4575         p = b->fd;
  4576         assert(chunksize(p) == small_index2size(idx));
  4577         unlink_first_small_chunk(ms, b, p, idx);
  4578         set_inuse_and_pinuse(ms, p, small_index2size(idx));
  4579         mem = chunk2mem(p);
  4580         check_malloced_chunk(ms, mem, nb);
  4581         goto postaction;
  4582       }
  4583 
  4584       else if (nb > ms->dvsize) {
  4585         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4586           mchunkptr b, p, r;
  4587           size_t rsize;
  4588           bindex_t i;
  4589           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4590           binmap_t leastbit = least_bit(leftbits);
  4591           compute_bit2idx(leastbit, i);
  4592           b = smallbin_at(ms, i);
  4593           p = b->fd;
  4594           assert(chunksize(p) == small_index2size(i));
  4595           unlink_first_small_chunk(ms, b, p, i);
  4596           rsize = small_index2size(i) - nb;
  4597           /* Fit here cannot be remainderless if 4byte sizes */
  4598           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4599             set_inuse_and_pinuse(ms, p, small_index2size(i));
  4600           else {
  4601             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4602             r = chunk_plus_offset(p, nb);
  4603             set_size_and_pinuse_of_free_chunk(r, rsize);
  4604             replace_dv(ms, r, rsize);
  4605           }
  4606           mem = chunk2mem(p);
  4607           check_malloced_chunk(ms, mem, nb);
  4608           goto postaction;
  4609         }
  4610 
  4611         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  4612           check_malloced_chunk(ms, mem, nb);
  4613           goto postaction;
  4614         }
  4615       }
  4616     }
  4617     else if (bytes >= MAX_REQUEST)
  4618       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4619     else {
  4620       nb = pad_request(bytes);
  4621       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  4622         check_malloced_chunk(ms, mem, nb);
  4623         goto postaction;
  4624       }
  4625     }
  4626 
  4627     if (nb <= ms->dvsize) {
  4628       size_t rsize = ms->dvsize - nb;
  4629       mchunkptr p = ms->dv;
  4630       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4631         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  4632         ms->dvsize = rsize;
  4633         set_size_and_pinuse_of_free_chunk(r, rsize);
  4634         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4635       }
  4636       else { /* exhaust dv */
  4637         size_t dvs = ms->dvsize;
  4638         ms->dvsize = 0;
  4639         ms->dv = 0;
  4640         set_inuse_and_pinuse(ms, p, dvs);
  4641       }
  4642       mem = chunk2mem(p);
  4643       check_malloced_chunk(ms, mem, nb);
  4644       goto postaction;
  4645     }
  4646 
  4647     else if (nb < ms->topsize) { /* Split top */
  4648       size_t rsize = ms->topsize -= nb;
  4649       mchunkptr p = ms->top;
  4650       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  4651       r->head = rsize | PINUSE_BIT;
  4652       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4653       mem = chunk2mem(p);
  4654       check_top_chunk(ms, ms->top);
  4655       check_malloced_chunk(ms, mem, nb);
  4656       goto postaction;
  4657     }
  4658 
  4659     mem = sys_alloc(ms, nb);
  4660 
  4661   postaction:
  4662     POSTACTION(ms);
  4663     return mem;
  4664   }
  4665 
  4666   return 0;
  4667 }
  4668 
  4669 void mspace_free(mspace msp, void* mem) {
  4670   if (mem != 0) {
  4671     mchunkptr p  = mem2chunk(mem);
  4672 #if FOOTERS
  4673     mstate fm = get_mstate_for(p);
  4674 #else /* FOOTERS */
  4675     mstate fm = (mstate)msp;
  4676 #endif /* FOOTERS */
  4677     if (!ok_magic(fm)) {
  4678       USAGE_ERROR_ACTION(fm, p);
  4679       return;
  4680     }
  4681     if (!PREACTION(fm)) {
  4682       check_inuse_chunk(fm, p);
  4683       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4684         size_t psize = chunksize(p);
  4685         mchunkptr next = chunk_plus_offset(p, psize);
  4686         if (!pinuse(p)) {
  4687           size_t prevsize = p->prev_foot;
  4688           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4689             prevsize &= ~IS_MMAPPED_BIT;
  4690             psize += prevsize + MMAP_FOOT_PAD;
  4691             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4692               fm->footprint -= psize;
  4693             goto postaction;
  4694           }
  4695           else {
  4696             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4697             psize += prevsize;
  4698             p = prev;
  4699             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4700               if (p != fm->dv) {
  4701                 unlink_chunk(fm, p, prevsize);
  4702               }
  4703               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4704                 fm->dvsize = psize;
  4705                 set_free_with_pinuse(p, psize, next);
  4706                 goto postaction;
  4707               }
  4708             }
  4709             else
  4710               goto erroraction;
  4711           }
  4712         }
  4713 
  4714         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4715           if (!cinuse(next)) {  /* consolidate forward */
  4716             if (next == fm->top) {
  4717               size_t tsize = fm->topsize += psize;
  4718               fm->top = p;
  4719               p->head = tsize | PINUSE_BIT;
  4720               if (p == fm->dv) {
  4721                 fm->dv = 0;
  4722                 fm->dvsize = 0;
  4723               }
  4724               if (should_trim(fm, tsize))
  4725                 sys_trim(fm, 0);
  4726               goto postaction;
  4727             }
  4728             else if (next == fm->dv) {
  4729               size_t dsize = fm->dvsize += psize;
  4730               fm->dv = p;
  4731               set_size_and_pinuse_of_free_chunk(p, dsize);
  4732               goto postaction;
  4733             }
  4734             else {
  4735               size_t nsize = chunksize(next);
  4736               psize += nsize;
  4737               unlink_chunk(fm, next, nsize);
  4738               set_size_and_pinuse_of_free_chunk(p, psize);
  4739               if (p == fm->dv) {
  4740                 fm->dvsize = psize;
  4741                 goto postaction;
  4742               }
  4743             }
  4744           }
  4745           else
  4746             set_free_with_pinuse(p, psize, next);
  4747           insert_chunk(fm, p, psize);
  4748           check_free_chunk(fm, p);
  4749           goto postaction;
  4750         }
  4751       }
  4752     erroraction:
  4753       USAGE_ERROR_ACTION(fm, p);
  4754     postaction:
  4755       POSTACTION(fm);
  4756     }
  4757   }
  4758 }
  4759 
  4760 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
  4761   void* mem;
  4762   size_t req = 0;
  4763   mstate ms = (mstate)msp;
  4764   if (!ok_magic(ms)) {
  4765     USAGE_ERROR_ACTION(ms,ms);
  4766     return 0;
  4767   }
  4768   if (n_elements != 0) {
  4769     req = n_elements * elem_size;
  4770     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4771         (req / n_elements != elem_size))
  4772       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4773   }
  4774   mem = internal_malloc(ms, req);
  4775   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4776     memset(mem, 0, req);
  4777   return mem;
  4778 }
  4779 
  4780 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
  4781   if (oldmem == 0)
  4782     return mspace_malloc(msp, bytes);
  4783 #ifdef REALLOC_ZERO_BYTES_FREES
  4784   if (bytes == 0) {
  4785     mspace_free(msp, oldmem);
  4786     return 0;
  4787   }
  4788 #endif /* REALLOC_ZERO_BYTES_FREES */
  4789   else {
  4790 #if FOOTERS
  4791     mchunkptr p  = mem2chunk(oldmem);
  4792     mstate ms = get_mstate_for(p);
  4793 #else /* FOOTERS */
  4794     mstate ms = (mstate)msp;
  4795 #endif /* FOOTERS */
  4796     if (!ok_magic(ms)) {
  4797       USAGE_ERROR_ACTION(ms,ms);
  4798       return 0;
  4799     }
  4800     return internal_realloc(ms, oldmem, bytes);
  4801   }
  4802 }
  4803 
  4804 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
  4805   mstate ms = (mstate)msp;
  4806   if (!ok_magic(ms)) {
  4807     USAGE_ERROR_ACTION(ms,ms);
  4808     return 0;
  4809   }
  4810   return internal_memalign(ms, alignment, bytes);
  4811 }
  4812 
  4813 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  4814                                  size_t elem_size, void* chunks[]) {
  4815   size_t sz = elem_size; /* serves as 1-element array */
  4816   mstate ms = (mstate)msp;
  4817   if (!ok_magic(ms)) {
  4818     USAGE_ERROR_ACTION(ms,ms);
  4819     return 0;
  4820   }
  4821   return ialloc(ms, n_elements, &sz, 3, chunks);
  4822 }
  4823 
  4824 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  4825                                    size_t sizes[], void* chunks[]) {
  4826   mstate ms = (mstate)msp;
  4827   if (!ok_magic(ms)) {
  4828     USAGE_ERROR_ACTION(ms,ms);
  4829     return 0;
  4830   }
  4831   return ialloc(ms, n_elements, sizes, 0, chunks);
  4832 }
  4833 
  4834 int mspace_trim(mspace msp, size_t pad) {
  4835   int result = 0;
  4836   mstate ms = (mstate)msp;
  4837   if (ok_magic(ms)) {
  4838     if (!PREACTION(ms)) {
  4839       result = sys_trim(ms, pad);
  4840       POSTACTION(ms);
  4841     }
  4842   }
  4843   else {
  4844     USAGE_ERROR_ACTION(ms,ms);
  4845   }
  4846   return result;
  4847 }
  4848 
  4849 void mspace_malloc_stats(mspace msp) {
  4850   mstate ms = (mstate)msp;
  4851   if (ok_magic(ms)) {
  4852     internal_malloc_stats(ms);
  4853   }
  4854   else {
  4855     USAGE_ERROR_ACTION(ms,ms);
  4856   }
  4857 }
  4858 
  4859 size_t mspace_footprint(mspace msp) {
  4860   size_t result;
  4861   mstate ms = (mstate)msp;
  4862   if (ok_magic(ms)) {
  4863     result = ms->footprint;
  4864   }
  4865   USAGE_ERROR_ACTION(ms,ms);
  4866   return result;
  4867 }
  4868 
  4869 
  4870 size_t mspace_max_footprint(mspace msp) {
  4871   size_t result;
  4872   mstate ms = (mstate)msp;
  4873   if (ok_magic(ms)) {
  4874     result = ms->max_footprint;
  4875   }
  4876   USAGE_ERROR_ACTION(ms,ms);
  4877   return result;
  4878 }
  4879 
  4880 
  4881 #if !NO_MALLINFO
  4882 struct mallinfo mspace_mallinfo(mspace msp) {
  4883   mstate ms = (mstate)msp;
  4884   if (!ok_magic(ms)) {
  4885     USAGE_ERROR_ACTION(ms,ms);
  4886   }
  4887   return internal_mallinfo(ms);
  4888 }
  4889 #endif /* NO_MALLINFO */
  4890 
  4891 int mspace_mallopt(int param_number, int value) {
  4892   return change_mparam(param_number, value);
  4893 }
  4894 
  4895 #endif /* MSPACES */
  4896 
  4897 /* -------------------- Alternative MORECORE functions ------------------- */
  4898 
  4899 /*
  4900   Guidelines for creating a custom version of MORECORE:
  4901 
  4902   * For best performance, MORECORE should allocate in multiples of pagesize.
  4903   * MORECORE may allocate more memory than requested. (Or even less,
  4904       but this will usually result in a malloc failure.)
  4905   * MORECORE must not allocate memory when given argument zero, but
  4906       instead return one past the end address of memory from previous
  4907       nonzero call.
  4908   * For best performance, consecutive calls to MORECORE with positive
  4909       arguments should return increasing addresses, indicating that
  4910       space has been contiguously extended.
  4911   * Even though consecutive calls to MORECORE need not return contiguous
  4912       addresses, it must be OK for malloc'ed chunks to span multiple
  4913       regions in those cases where they do happen to be contiguous.
  4914   * MORECORE need not handle negative arguments -- it may instead
  4915       just return MFAIL when given negative arguments.
  4916       Negative arguments are always multiples of pagesize. MORECORE
  4917       must not misinterpret negative args as large positive unsigned
  4918       args. You can suppress all such calls from even occurring by defining
  4919       MORECORE_CANNOT_TRIM,
  4920 
  4921   As an example alternative MORECORE, here is a custom allocator
  4922   kindly contributed for pre-OSX macOS.  It uses virtually but not
  4923   necessarily physically contiguous non-paged memory (locked in,
  4924   present and won't get swapped out).  You can use it by uncommenting
  4925   this section, adding some #includes, and setting up the appropriate
  4926   defines above:
  4927 
  4928       #define MORECORE osMoreCore
  4929 
  4930   There is also a shutdown routine that should somehow be called for
  4931   cleanup upon program exit.
  4932 
  4933   #define MAX_POOL_ENTRIES 100
  4934   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  4935   static int next_os_pool;
  4936   void *our_os_pools[MAX_POOL_ENTRIES];
  4937 
  4938   void *osMoreCore(int size)
  4939   {
  4940     void *ptr = 0;
  4941     static void *sbrk_top = 0;
  4942 
  4943     if (size > 0)
  4944     {
  4945       if (size < MINIMUM_MORECORE_SIZE)
  4946          size = MINIMUM_MORECORE_SIZE;
  4947       if (CurrentExecutionLevel() == kTaskLevel)
  4948          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  4949       if (ptr == 0)
  4950       {
  4951         return (void *) MFAIL;
  4952       }
  4953       // save ptrs so they can be freed during cleanup
  4954       our_os_pools[next_os_pool] = ptr;
  4955       next_os_pool++;
  4956       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  4957       sbrk_top = (char *) ptr + size;
  4958       return ptr;
  4959     }
  4960     else if (size < 0)
  4961     {
  4962       // we don't currently support shrink behavior
  4963       return (void *) MFAIL;
  4964     }
  4965     else
  4966     {
  4967       return sbrk_top;
  4968     }
  4969   }
  4970 
  4971   // cleanup any allocated memory pools
  4972   // called as last thing before shutting down driver
  4973 
  4974   void osCleanupMem(void)
  4975   {
  4976     void **ptr;
  4977 
  4978     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  4979       if (*ptr)
  4980       {
  4981          PoolDeallocate(*ptr);
  4982          *ptr = 0;
  4983       }
  4984   }
  4985 
  4986 */
  4987 
  4988 
  4989 /* -----------------------------------------------------------------------
  4990 History:
  4991     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  4992       * Add max_footprint functions
  4993       * Ensure all appropriate literals are size_t
  4994       * Fix conditional compilation problem for some #define settings
  4995       * Avoid concatenating segments with the one provided
  4996         in create_mspace_with_base
  4997       * Rename some variables to avoid compiler shadowing warnings
  4998       * Use explicit lock initialization.
  4999       * Better handling of sbrk interference.
  5000       * Simplify and fix segment insertion, trimming and mspace_destroy
  5001       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  5002       * Thanks especially to Dennis Flanagan for help on these.
  5003 
  5004     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  5005       * Fix memalign brace error.
  5006 
  5007     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  5008       * Fix improper #endif nesting in C++
  5009       * Add explicit casts needed for C++
  5010 
  5011     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  5012       * Use trees for large bins
  5013       * Support mspaces
  5014       * Use segments to unify sbrk-based and mmap-based system allocation,
  5015         removing need for emulation on most platforms without sbrk.
  5016       * Default safety checks
  5017       * Optional footer checks. Thanks to William Robertson for the idea.
  5018       * Internal code refactoring
  5019       * Incorporate suggestions and platform-specific changes.
  5020         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  5021         Aaron Bachmann,  Emery Berger, and others.
  5022       * Speed up non-fastbin processing enough to remove fastbins.
  5023       * Remove useless cfree() to avoid conflicts with other apps.
  5024       * Remove internal memcpy, memset. Compilers handle builtins better.
  5025       * Remove some options that no one ever used and rename others.
  5026 
  5027     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  5028       * Fix malloc_state bitmap array misdeclaration
  5029 
  5030     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  5031       * Allow tuning of FIRST_SORTED_BIN_SIZE
  5032       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  5033       * Better detection and support for non-contiguousness of MORECORE.
  5034         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  5035       * Bypass most of malloc if no frees. Thanks To Emery Berger.
  5036       * Fix freeing of old top non-contiguous chunk im sysmalloc.
  5037       * Raised default trim and map thresholds to 256K.
  5038       * Fix mmap-related #defines. Thanks to Lubos Lunak.
  5039       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  5040       * Branch-free bin calculation
  5041       * Default trim and mmap thresholds now 256K.
  5042 
  5043     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  5044       * Introduce independent_comalloc and independent_calloc.
  5045         Thanks to Michael Pachos for motivation and help.
  5046       * Make optional .h file available
  5047       * Allow > 2GB requests on 32bit systems.
  5048       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  5049         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  5050         and Anonymous.
  5051       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  5052         helping test this.)
  5053       * memalign: check alignment arg
  5054       * realloc: don't try to shift chunks backwards, since this
  5055         leads to  more fragmentation in some programs and doesn't
  5056         seem to help in any others.
  5057       * Collect all cases in malloc requiring system memory into sysmalloc
  5058       * Use mmap as backup to sbrk
  5059       * Place all internal state in malloc_state
  5060       * Introduce fastbins (although similar to 2.5.1)
  5061       * Many minor tunings and cosmetic improvements
  5062       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5063       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5064         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5065       * Include errno.h to support default failure action.
  5066 
  5067     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5068       * return null for negative arguments
  5069       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5070          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5071           (e.g. WIN32 platforms)
  5072          * Cleanup header file inclusion for WIN32 platforms
  5073          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5074          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5075            memory allocation routines
  5076          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5077          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5078            usage of 'assert' in non-WIN32 code
  5079          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5080            avoid infinite loop
  5081       * Always call 'fREe()' rather than 'free()'
  5082 
  5083     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5084       * Fixed ordering problem with boundary-stamping
  5085 
  5086     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5087       * Added pvalloc, as recommended by H.J. Liu
  5088       * Added 64bit pointer support mainly from Wolfram Gloger
  5089       * Added anonymously donated WIN32 sbrk emulation
  5090       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5091       * malloc_extend_top: fix mask error that caused wastage after
  5092         foreign sbrks
  5093       * Add linux mremap support code from HJ Liu
  5094 
  5095     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5096       * Integrated most documentation with the code.
  5097       * Add support for mmap, with help from
  5098         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5099       * Use last_remainder in more cases.
  5100       * Pack bins using idea from  colin@nyx10.cs.du.edu
  5101       * Use ordered bins instead of best-fit threshhold
  5102       * Eliminate block-local decls to simplify tracing and debugging.
  5103       * Support another case of realloc via move into top
  5104       * Fix error occuring when initial sbrk_base not word-aligned.
  5105       * Rely on page size for units instead of SBRK_UNIT to
  5106         avoid surprises about sbrk alignment conventions.
  5107       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5108         (raymond@es.ele.tue.nl) for the suggestion.
  5109       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5110       * More precautions for cases where other routines call sbrk,
  5111         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5112       * Added macros etc., allowing use in linux libc from
  5113         H.J. Lu (hjl@gnu.ai.mit.edu)
  5114       * Inverted this history list
  5115 
  5116     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5117       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5118       * Removed all preallocation code since under current scheme
  5119         the work required to undo bad preallocations exceeds
  5120         the work saved in good cases for most test programs.
  5121       * No longer use return list or unconsolidated bins since
  5122         no scheme using them consistently outperforms those that don't
  5123         given above changes.
  5124       * Use best fit for very large chunks to prevent some worst-cases.
  5125       * Added some support for debugging
  5126 
  5127     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5128       * Removed footers when chunks are in use. Thanks to
  5129         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5130 
  5131     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5132       * Added malloc_trim, with help from Wolfram Gloger
  5133         (wmglo@Dent.MED.Uni-Muenchen.DE).
  5134 
  5135     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5136 
  5137     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5138       * realloc: try to expand in both directions
  5139       * malloc: swap order of clean-bin strategy;
  5140       * realloc: only conditionally expand backwards
  5141       * Try not to scavenge used bins
  5142       * Use bin counts as a guide to preallocation
  5143       * Occasionally bin return list chunks in first scan
  5144       * Add a few optimizations from colin@nyx10.cs.du.edu
  5145 
  5146     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5147       * faster bin computation & slightly different binning
  5148       * merged all consolidations to one part of malloc proper
  5149          (eliminating old malloc_find_space & malloc_clean_bin)
  5150       * Scan 2 returns chunks (not just 1)
  5151       * Propagate failure in realloc if malloc returns 0
  5152       * Add stuff to allow compilation on non-ANSI compilers
  5153           from kpv@research.att.com
  5154 
  5155     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5156       * removed potential for odd address access in prev_chunk
  5157       * removed dependency on getpagesize.h
  5158       * misc cosmetics and a bit more internal documentation
  5159       * anticosmetics: mangled names in macros to evade debugger strangeness
  5160       * tested on sparc, hp-700, dec-mips, rs6000
  5161           with gcc & native cc (hp, dec only) allowing
  5162           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5163 
  5164     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5165       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5166          structure of old version,  but most details differ.)
  5167  
  5168 */
  5169 
  5170 #endif /* !HAVE_MALLOC */