src/stdlib/SDL_malloc.c
author Sam Lantinga
Tue, 07 Feb 2006 06:59:48 +0000
changeset 1336 3692456e7b0f
parent 1333 39b0d60d3f50
child 1338 604d73db6802
permissions -rw-r--r--
Use SDL_ prefixed versions of C library functions.
FIXME:
Change #include <stdlib.h> to #include "SDL_stdlib.h"
Change #include <string.h> to #include "SDL_string.h"
Make sure nothing else broke because of this...
     1 /*
     2     SDL - Simple DirectMedia Layer
     3     Copyright (C) 1997-2006 Sam Lantinga
     4 
     5     This library is free software; you can redistribute it and/or
     6     modify it under the terms of the GNU Lesser General Public
     7     License as published by the Free Software Foundation; either
     8     version 2.1 of the License, or (at your option) any later version.
     9 
    10     This library is distributed in the hope that it will be useful,
    11     but WITHOUT ANY WARRANTY; without even the implied warranty of
    12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    13     Lesser General Public License for more details.
    14 
    15     You should have received a copy of the GNU Lesser General Public
    16     License along with this library; if not, write to the Free Software
    17     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    18 
    19     Sam Lantinga
    20     slouken@libsdl.org
    21 */
    22 
    23 
    24 /* This file contains portable memory management functions for SDL */
    25 
    26 #include "SDL_stdlib.h"
    27 #include "SDL_string.h"
    28 
    29 #ifndef HAVE_MALLOC
    30 
    31 #define LACKS_STDIO_H
    32 #define LACKS_UNISTD_H
    33 #define LACKS_FCNTL_H
    34 #define LACKS_SYS_PARAM_H
    35 #define LACKS_SYS_MMAN_H
    36 #define LACKS_STRINGS_H
    37 #define LACKS_STRING_H
    38 #define LACKS_SYS_TYPES_H
    39 #define LACKS_ERRNO_H
    40 #define LACKS_STDLIB_H
    41 #define ABORT
    42 #define memset	SDL_memset
    43 #define memcpy	SDL_memcpy
    44 #define malloc	SDL_malloc
    45 #define realloc	SDL_realloc
    46 #define free	SDL_free
    47 
    48 /*
    49   This is a version (aka dlmalloc) of malloc/free/realloc written by
    50   Doug Lea and released to the public domain, as explained at
    51   http://creativecommons.org/licenses/publicdomain.  Send questions,
    52   comments, complaints, performance data, etc to dl@cs.oswego.edu
    53 
    54 * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    55 
    56    Note: There may be an updated version of this malloc obtainable at
    57            ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    58          Check before installing!
    59 
    60 * Quickstart
    61 
    62   This library is all in one file to simplify the most common usage:
    63   ftp it, compile it (-O3), and link it into another program. All of
    64   the compile-time options default to reasonable values for use on
    65   most platforms.  You might later want to step through various
    66   compile-time and dynamic tuning options.
    67 
    68   For convenience, an include file for code using this malloc is at:
    69      ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    70   You don't really need this .h file unless you call functions not
    71   defined in your system include files.  The .h file contains only the
    72   excerpts from this file needed for using this malloc on ANSI C/C++
    73   systems, so long as you haven't changed compile-time options about
    74   naming and tuning parameters.  If you do, then you can create your
    75   own malloc.h that does include all settings by cutting at the point
    76   indicated below. Note that you may already by default be using a C
    77   library containing a malloc that is based on some version of this
    78   malloc (for example in linux). You might still want to use the one
    79   in this file to customize settings or to avoid overheads associated
    80   with library versions.
    81 
    82 * Vital statistics:
    83 
    84   Supported pointer/size_t representation:       4 or 8 bytes
    85        size_t MUST be an unsigned type of the same width as
    86        pointers. (If you are using an ancient system that declares
    87        size_t as a signed type, or need it to be a different width
    88        than pointers, you can use a previous release of this malloc
    89        (e.g. 2.7.2) supporting these.)
    90 
    91   Alignment:                                     8 bytes (default)
    92        This suffices for nearly all current machines and C compilers.
    93        However, you can define MALLOC_ALIGNMENT to be wider than this
    94        if necessary (up to 128bytes), at the expense of using more space.
    95 
    96   Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    97                                           8 or 16 bytes (if 8byte sizes)
    98        Each malloced chunk has a hidden word of overhead holding size
    99        and status information, and additional cross-check word
   100        if FOOTERS is defined.
   101 
   102   Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
   103                           8-byte ptrs:  32 bytes    (including overhead)
   104 
   105        Even a request for zero bytes (i.e., malloc(0)) returns a
   106        pointer to something of the minimum allocatable size.
   107        The maximum overhead wastage (i.e., number of extra bytes
   108        allocated than were requested in malloc) is less than or equal
   109        to the minimum size, except for requests >= mmap_threshold that
   110        are serviced via mmap(), where the worst case wastage is about
   111        32 bytes plus the remainder from a system page (the minimal
   112        mmap unit); typically 4096 or 8192 bytes.
   113 
   114   Security: static-safe; optionally more or less
   115        The "security" of malloc refers to the ability of malicious
   116        code to accentuate the effects of errors (for example, freeing
   117        space that is not currently malloc'ed or overwriting past the
   118        ends of chunks) in code that calls malloc.  This malloc
   119        guarantees not to modify any memory locations below the base of
   120        heap, i.e., static variables, even in the presence of usage
   121        errors.  The routines additionally detect most improper frees
   122        and reallocs.  All this holds as long as the static bookkeeping
   123        for malloc itself is not corrupted by some other means.  This
   124        is only one aspect of security -- these checks do not, and
   125        cannot, detect all possible programming errors.
   126 
   127        If FOOTERS is defined nonzero, then each allocated chunk
   128        carries an additional check word to verify that it was malloced
   129        from its space.  These check words are the same within each
   130        execution of a program using malloc, but differ across
   131        executions, so externally crafted fake chunks cannot be
   132        freed. This improves security by rejecting frees/reallocs that
   133        could corrupt heap memory, in addition to the checks preventing
   134        writes to statics that are always on.  This may further improve
   135        security at the expense of time and space overhead.  (Note that
   136        FOOTERS may also be worth using with MSPACES.)
   137 
   138        By default detected errors cause the program to abort (calling
   139        "abort()"). You can override this to instead proceed past
   140        errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   141        has no effect, and a malloc that encounters a bad address
   142        caused by user overwrites will ignore the bad address by
   143        dropping pointers and indices to all known memory. This may
   144        be appropriate for programs that should continue if at all
   145        possible in the face of programming errors, although they may
   146        run out of memory because dropped memory is never reclaimed.
   147 
   148        If you don't like either of these options, you can define
   149        CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   150        else. And if if you are sure that your program using malloc has
   151        no errors or vulnerabilities, you can define INSECURE to 1,
   152        which might (or might not) provide a small performance improvement.
   153 
   154   Thread-safety: NOT thread-safe unless USE_LOCKS defined
   155        When USE_LOCKS is defined, each public call to malloc, free,
   156        etc is surrounded with either a pthread mutex or a win32
   157        spinlock (depending on WIN32). This is not especially fast, and
   158        can be a major bottleneck.  It is designed only to provide
   159        minimal protection in concurrent environments, and to provide a
   160        basis for extensions.  If you are using malloc in a concurrent
   161        program, consider instead using ptmalloc, which is derived from
   162        a version of this malloc. (See http://www.malloc.de).
   163 
   164   System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   165        This malloc can use unix sbrk or any emulation (invoked using
   166        the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   167        (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   168        memory.  On most unix systems, it tends to work best if both
   169        MORECORE and MMAP are enabled.  On Win32, it uses emulations
   170        based on VirtualAlloc. It also uses common C library functions
   171        like memset.
   172 
   173   Compliance: I believe it is compliant with the Single Unix Specification
   174        (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   175        others as well.
   176 
   177 * Overview of algorithms
   178 
   179   This is not the fastest, most space-conserving, most portable, or
   180   most tunable malloc ever written. However it is among the fastest
   181   while also being among the most space-conserving, portable and
   182   tunable.  Consistent balance across these factors results in a good
   183   general-purpose allocator for malloc-intensive programs.
   184 
   185   In most ways, this malloc is a best-fit allocator. Generally, it
   186   chooses the best-fitting existing chunk for a request, with ties
   187   broken in approximately least-recently-used order. (This strategy
   188   normally maintains low fragmentation.) However, for requests less
   189   than 256bytes, it deviates from best-fit when there is not an
   190   exactly fitting available chunk by preferring to use space adjacent
   191   to that used for the previous small request, as well as by breaking
   192   ties in approximately most-recently-used order. (These enhance
   193   locality of series of small allocations.)  And for very large requests
   194   (>= 256Kb by default), it relies on system memory mapping
   195   facilities, if supported.  (This helps avoid carrying around and
   196   possibly fragmenting memory used only for large chunks.)
   197 
   198   All operations (except malloc_stats and mallinfo) have execution
   199   times that are bounded by a constant factor of the number of bits in
   200   a size_t, not counting any clearing in calloc or copying in realloc,
   201   or actions surrounding MORECORE and MMAP that have times
   202   proportional to the number of non-contiguous regions returned by
   203   system allocation routines, which is often just 1.
   204 
   205   The implementation is not very modular and seriously overuses
   206   macros. Perhaps someday all C compilers will do as good a job
   207   inlining modular code as can now be done by brute-force expansion,
   208   but now, enough of them seem not to.
   209 
   210   Some compilers issue a lot of warnings about code that is
   211   dead/unreachable only on some platforms, and also about intentional
   212   uses of negation on unsigned types. All known cases of each can be
   213   ignored.
   214 
   215   For a longer but out of date high-level description, see
   216      http://gee.cs.oswego.edu/dl/html/malloc.html
   217 
   218 * MSPACES
   219   If MSPACES is defined, then in addition to malloc, free, etc.,
   220   this file also defines mspace_malloc, mspace_free, etc. These
   221   are versions of malloc routines that take an "mspace" argument
   222   obtained using create_mspace, to control all internal bookkeeping.
   223   If ONLY_MSPACES is defined, only these versions are compiled.
   224   So if you would like to use this allocator for only some allocations,
   225   and your system malloc for others, you can compile with
   226   ONLY_MSPACES and then do something like...
   227     static mspace mymspace = create_mspace(0,0); // for example
   228     #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   229 
   230   (Note: If you only need one instance of an mspace, you can instead
   231   use "USE_DL_PREFIX" to relabel the global malloc.)
   232 
   233   You can similarly create thread-local allocators by storing
   234   mspaces as thread-locals. For example:
   235     static __thread mspace tlms = 0;
   236     void*  tlmalloc(size_t bytes) {
   237       if (tlms == 0) tlms = create_mspace(0, 0);
   238       return mspace_malloc(tlms, bytes);
   239     }
   240     void  tlfree(void* mem) { mspace_free(tlms, mem); }
   241 
   242   Unless FOOTERS is defined, each mspace is completely independent.
   243   You cannot allocate from one and free to another (although
   244   conformance is only weakly checked, so usage errors are not always
   245   caught). If FOOTERS is defined, then each chunk carries around a tag
   246   indicating its originating mspace, and frees are directed to their
   247   originating spaces.
   248 
   249  -------------------------  Compile-time options ---------------------------
   250 
   251 Be careful in setting #define values for numerical constants of type
   252 size_t. On some systems, literal values are not automatically extended
   253 to size_t precision unless they are explicitly casted.
   254 
   255 WIN32                    default: defined if _WIN32 defined
   256   Defining WIN32 sets up defaults for MS environment and compilers.
   257   Otherwise defaults are for unix.
   258 
   259 MALLOC_ALIGNMENT         default: (size_t)8
   260   Controls the minimum alignment for malloc'ed chunks.  It must be a
   261   power of two and at least 8, even on machines for which smaller
   262   alignments would suffice. It may be defined as larger than this
   263   though. Note however that code and data structures are optimized for
   264   the case of 8-byte alignment.
   265 
   266 MSPACES                  default: 0 (false)
   267   If true, compile in support for independent allocation spaces.
   268   This is only supported if HAVE_MMAP is true.
   269 
   270 ONLY_MSPACES             default: 0 (false)
   271   If true, only compile in mspace versions, not regular versions.
   272 
   273 USE_LOCKS                default: 0 (false)
   274   Causes each call to each public routine to be surrounded with
   275   pthread or WIN32 mutex lock/unlock. (If set true, this can be
   276   overridden on a per-mspace basis for mspace versions.)
   277 
   278 FOOTERS                  default: 0
   279   If true, provide extra checking and dispatching by placing
   280   information in the footers of allocated chunks. This adds
   281   space and time overhead.
   282 
   283 INSECURE                 default: 0
   284   If true, omit checks for usage errors and heap space overwrites.
   285 
   286 USE_DL_PREFIX            default: NOT defined
   287   Causes compiler to prefix all public routines with the string 'dl'.
   288   This can be useful when you only want to use this malloc in one part
   289   of a program, using your regular system malloc elsewhere.
   290 
   291 ABORT                    default: defined as abort()
   292   Defines how to abort on failed checks.  On most systems, a failed
   293   check cannot die with an "assert" or even print an informative
   294   message, because the underlying print routines in turn call malloc,
   295   which will fail again.  Generally, the best policy is to simply call
   296   abort(). It's not very useful to do more than this because many
   297   errors due to overwriting will show up as address faults (null, odd
   298   addresses etc) rather than malloc-triggered checks, so will also
   299   abort.  Also, most compilers know that abort() does not return, so
   300   can better optimize code conditionally calling it.
   301 
   302 PROCEED_ON_ERROR           default: defined as 0 (false)
   303   Controls whether detected bad addresses cause them to bypassed
   304   rather than aborting. If set, detected bad arguments to free and
   305   realloc are ignored. And all bookkeeping information is zeroed out
   306   upon a detected overwrite of freed heap space, thus losing the
   307   ability to ever return it from malloc again, but enabling the
   308   application to proceed. If PROCEED_ON_ERROR is defined, the
   309   static variable malloc_corruption_error_count is compiled in
   310   and can be examined to see if errors have occurred. This option
   311   generates slower code than the default abort policy.
   312 
   313 DEBUG                    default: NOT defined
   314   The DEBUG setting is mainly intended for people trying to modify
   315   this code or diagnose problems when porting to new platforms.
   316   However, it may also be able to better isolate user errors than just
   317   using runtime checks.  The assertions in the check routines spell
   318   out in more detail the assumptions and invariants underlying the
   319   algorithms.  The checking is fairly extensive, and will slow down
   320   execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   321   set will attempt to check every non-mmapped allocated and free chunk
   322   in the course of computing the summaries.
   323 
   324 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   325   Debugging assertion failures can be nearly impossible if your
   326   version of the assert macro causes malloc to be called, which will
   327   lead to a cascade of further failures, blowing the runtime stack.
   328   ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   329   which will usually make debugging easier.
   330 
   331 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   332   The action to take before "return 0" when malloc fails to be able to
   333   return memory because there is none available.
   334 
   335 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   336   True if this system supports sbrk or an emulation of it.
   337 
   338 MORECORE                  default: sbrk
   339   The name of the sbrk-style system routine to call to obtain more
   340   memory.  See below for guidance on writing custom MORECORE
   341   functions. The type of the argument to sbrk/MORECORE varies across
   342   systems.  It cannot be size_t, because it supports negative
   343   arguments, so it is normally the signed type of the same width as
   344   size_t (sometimes declared as "intptr_t").  It doesn't much matter
   345   though. Internally, we only call it with arguments less than half
   346   the max value of a size_t, which should work across all reasonable
   347   possibilities, although sometimes generating compiler warnings.  See
   348   near the end of this file for guidelines for creating a custom
   349   version of MORECORE.
   350 
   351 MORECORE_CONTIGUOUS       default: 1 (true)
   352   If true, take advantage of fact that consecutive calls to MORECORE
   353   with positive arguments always return contiguous increasing
   354   addresses.  This is true of unix sbrk. It does not hurt too much to
   355   set it true anyway, since malloc copes with non-contiguities.
   356   Setting it false when definitely non-contiguous saves time
   357   and possibly wasted space it would take to discover this though.
   358 
   359 MORECORE_CANNOT_TRIM      default: NOT defined
   360   True if MORECORE cannot release space back to the system when given
   361   negative arguments. This is generally necessary only if you are
   362   using a hand-crafted MORECORE function that cannot handle negative
   363   arguments.
   364 
   365 HAVE_MMAP                 default: 1 (true)
   366   True if this system supports mmap or an emulation of it.  If so, and
   367   HAVE_MORECORE is not true, MMAP is used for all system
   368   allocation. If set and HAVE_MORECORE is true as well, MMAP is
   369   primarily used to directly allocate very large blocks. It is also
   370   used as a backup strategy in cases where MORECORE fails to provide
   371   space from system. Note: A single call to MUNMAP is assumed to be
   372   able to unmap memory that may have be allocated using multiple calls
   373   to MMAP, so long as they are adjacent.
   374 
   375 HAVE_MREMAP               default: 1 on linux, else 0
   376   If true realloc() uses mremap() to re-allocate large blocks and
   377   extend or shrink allocation spaces.
   378 
   379 MMAP_CLEARS               default: 1 on unix
   380   True if mmap clears memory so calloc doesn't need to. This is true
   381   for standard unix mmap using /dev/zero.
   382 
   383 USE_BUILTIN_FFS            default: 0 (i.e., not used)
   384   Causes malloc to use the builtin ffs() function to compute indices.
   385   Some compilers may recognize and intrinsify ffs to be faster than the
   386   supplied C version. Also, the case of x86 using gcc is special-cased
   387   to an asm instruction, so is already as fast as it can be, and so
   388   this setting has no effect. (On most x86s, the asm version is only
   389   slightly faster than the C version.)
   390 
   391 malloc_getpagesize         default: derive from system includes, or 4096.
   392   The system page size. To the extent possible, this malloc manages
   393   memory from the system in page-size units.  This may be (and
   394   usually is) a function rather than a constant. This is ignored
   395   if WIN32, where page size is determined using getSystemInfo during
   396   initialization.
   397 
   398 USE_DEV_RANDOM             default: 0 (i.e., not used)
   399   Causes malloc to use /dev/random to initialize secure magic seed for
   400   stamping footers. Otherwise, the current time is used.
   401 
   402 NO_MALLINFO                default: 0
   403   If defined, don't compile "mallinfo". This can be a simple way
   404   of dealing with mismatches between system declarations and
   405   those in this file.
   406 
   407 MALLINFO_FIELD_TYPE        default: size_t
   408   The type of the fields in the mallinfo struct. This was originally
   409   defined as "int" in SVID etc, but is more usefully defined as
   410   size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   411 
   412 REALLOC_ZERO_BYTES_FREES    default: not defined
   413   This should be set if a call to realloc with zero bytes should 
   414   be the same as a call to free. Some people think it should. Otherwise, 
   415   since this malloc returns a unique pointer for malloc(0), so does 
   416   realloc(p, 0).
   417 
   418 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   419 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   420 LACKS_STDLIB_H                default: NOT defined unless on WIN32
   421   Define these if your system does not have these header files.
   422   You might need to manually insert some of the declarations they provide.
   423 
   424 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   425                                 system_info.dwAllocationGranularity in WIN32,
   426                                 otherwise 64K.
   427       Also settable using mallopt(M_GRANULARITY, x)
   428   The unit for allocating and deallocating memory from the system.  On
   429   most systems with contiguous MORECORE, there is no reason to
   430   make this more than a page. However, systems with MMAP tend to
   431   either require or encourage larger granularities.  You can increase
   432   this value to prevent system allocation functions to be called so
   433   often, especially if they are slow.  The value must be at least one
   434   page and must be a power of two.  Setting to 0 causes initialization
   435   to either page size or win32 region size.  (Note: In previous
   436   versions of malloc, the equivalent of this option was called
   437   "TOP_PAD")
   438 
   439 DEFAULT_TRIM_THRESHOLD    default: 2MB
   440       Also settable using mallopt(M_TRIM_THRESHOLD, x)
   441   The maximum amount of unused top-most memory to keep before
   442   releasing via malloc_trim in free().  Automatic trimming is mainly
   443   useful in long-lived programs using contiguous MORECORE.  Because
   444   trimming via sbrk can be slow on some systems, and can sometimes be
   445   wasteful (in cases where programs immediately afterward allocate
   446   more large chunks) the value should be high enough so that your
   447   overall system performance would improve by releasing this much
   448   memory.  As a rough guide, you might set to a value close to the
   449   average size of a process (program) running on your system.
   450   Releasing this much memory would allow such a process to run in
   451   memory.  Generally, it is worth tuning trim thresholds when a
   452   program undergoes phases where several large chunks are allocated
   453   and released in ways that can reuse each other's storage, perhaps
   454   mixed with phases where there are no such chunks at all. The trim
   455   value must be greater than page size to have any useful effect.  To
   456   disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   457   some people use of mallocing a huge space and then freeing it at
   458   program startup, in an attempt to reserve system memory, doesn't
   459   have the intended effect under automatic trimming, since that memory
   460   will immediately be returned to the system.
   461 
   462 DEFAULT_MMAP_THRESHOLD       default: 256K
   463       Also settable using mallopt(M_MMAP_THRESHOLD, x)
   464   The request size threshold for using MMAP to directly service a
   465   request. Requests of at least this size that cannot be allocated
   466   using already-existing space will be serviced via mmap.  (If enough
   467   normal freed space already exists it is used instead.)  Using mmap
   468   segregates relatively large chunks of memory so that they can be
   469   individually obtained and released from the host system. A request
   470   serviced through mmap is never reused by any other request (at least
   471   not directly; the system may just so happen to remap successive
   472   requests to the same locations).  Segregating space in this way has
   473   the benefits that: Mmapped space can always be individually released
   474   back to the system, which helps keep the system level memory demands
   475   of a long-lived program low.  Also, mapped memory doesn't become
   476   `locked' between other chunks, as can happen with normally allocated
   477   chunks, which means that even trimming via malloc_trim would not
   478   release them.  However, it has the disadvantage that the space
   479   cannot be reclaimed, consolidated, and then used to service later
   480   requests, as happens with normal chunks.  The advantages of mmap
   481   nearly always outweigh disadvantages for "large" chunks, but the
   482   value of "large" may vary across systems.  The default is an
   483   empirically derived value that works well in most systems. You can
   484   disable mmap by setting to MAX_SIZE_T.
   485 
   486 */
   487 
   488 #ifndef WIN32
   489 #ifdef _WIN32
   490 #define WIN32 1
   491 #endif  /* _WIN32 */
   492 #endif  /* WIN32 */
   493 #ifdef WIN32
   494 #include "SDL_windows.h"
   495 #define HAVE_MMAP 1
   496 #define HAVE_MORECORE 0
   497 #define LACKS_UNISTD_H
   498 #define LACKS_SYS_PARAM_H
   499 #define LACKS_SYS_MMAN_H
   500 #define LACKS_STRING_H
   501 #define LACKS_STRINGS_H
   502 #define LACKS_SYS_TYPES_H
   503 #define LACKS_ERRNO_H
   504 #define MALLOC_FAILURE_ACTION
   505 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   506 #endif  /* WIN32 */
   507 
   508 #if defined(DARWIN) || defined(_DARWIN)
   509 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   510 #ifndef HAVE_MORECORE
   511 #define HAVE_MORECORE 0
   512 #define HAVE_MMAP 1
   513 #endif  /* HAVE_MORECORE */
   514 #endif  /* DARWIN */
   515 
   516 #ifndef LACKS_SYS_TYPES_H
   517 #include <sys/types.h>  /* For size_t */
   518 #endif  /* LACKS_SYS_TYPES_H */
   519 
   520 /* The maximum possible size_t value has all bits set */
   521 #define MAX_SIZE_T           (~(size_t)0)
   522 
   523 #ifndef ONLY_MSPACES
   524 #define ONLY_MSPACES 0
   525 #endif  /* ONLY_MSPACES */
   526 #ifndef MSPACES
   527 #if ONLY_MSPACES
   528 #define MSPACES 1
   529 #else   /* ONLY_MSPACES */
   530 #define MSPACES 0
   531 #endif  /* ONLY_MSPACES */
   532 #endif  /* MSPACES */
   533 #ifndef MALLOC_ALIGNMENT
   534 #define MALLOC_ALIGNMENT ((size_t)8U)
   535 #endif  /* MALLOC_ALIGNMENT */
   536 #ifndef FOOTERS
   537 #define FOOTERS 0
   538 #endif  /* FOOTERS */
   539 #ifndef ABORT
   540 #define ABORT  abort()
   541 #endif  /* ABORT */
   542 #ifndef ABORT_ON_ASSERT_FAILURE
   543 #define ABORT_ON_ASSERT_FAILURE 1
   544 #endif  /* ABORT_ON_ASSERT_FAILURE */
   545 #ifndef PROCEED_ON_ERROR
   546 #define PROCEED_ON_ERROR 0
   547 #endif  /* PROCEED_ON_ERROR */
   548 #ifndef USE_LOCKS
   549 #define USE_LOCKS 0
   550 #endif  /* USE_LOCKS */
   551 #ifndef INSECURE
   552 #define INSECURE 0
   553 #endif  /* INSECURE */
   554 #ifndef HAVE_MMAP
   555 #define HAVE_MMAP 1
   556 #endif  /* HAVE_MMAP */
   557 #ifndef MMAP_CLEARS
   558 #define MMAP_CLEARS 1
   559 #endif  /* MMAP_CLEARS */
   560 #ifndef HAVE_MREMAP
   561 #ifdef linux
   562 #define HAVE_MREMAP 1
   563 #else   /* linux */
   564 #define HAVE_MREMAP 0
   565 #endif  /* linux */
   566 #endif  /* HAVE_MREMAP */
   567 #ifndef MALLOC_FAILURE_ACTION
   568 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   569 #endif  /* MALLOC_FAILURE_ACTION */
   570 #ifndef HAVE_MORECORE
   571 #if ONLY_MSPACES
   572 #define HAVE_MORECORE 0
   573 #else   /* ONLY_MSPACES */
   574 #define HAVE_MORECORE 1
   575 #endif  /* ONLY_MSPACES */
   576 #endif  /* HAVE_MORECORE */
   577 #if !HAVE_MORECORE
   578 #define MORECORE_CONTIGUOUS 0
   579 #else   /* !HAVE_MORECORE */
   580 #ifndef MORECORE
   581 #define MORECORE sbrk
   582 #endif  /* MORECORE */
   583 #ifndef MORECORE_CONTIGUOUS
   584 #define MORECORE_CONTIGUOUS 1
   585 #endif  /* MORECORE_CONTIGUOUS */
   586 #endif  /* HAVE_MORECORE */
   587 #ifndef DEFAULT_GRANULARITY
   588 #if MORECORE_CONTIGUOUS
   589 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   590 #else   /* MORECORE_CONTIGUOUS */
   591 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   592 #endif  /* MORECORE_CONTIGUOUS */
   593 #endif  /* DEFAULT_GRANULARITY */
   594 #ifndef DEFAULT_TRIM_THRESHOLD
   595 #ifndef MORECORE_CANNOT_TRIM
   596 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   597 #else   /* MORECORE_CANNOT_TRIM */
   598 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   599 #endif  /* MORECORE_CANNOT_TRIM */
   600 #endif  /* DEFAULT_TRIM_THRESHOLD */
   601 #ifndef DEFAULT_MMAP_THRESHOLD
   602 #if HAVE_MMAP
   603 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   604 #else   /* HAVE_MMAP */
   605 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   606 #endif  /* HAVE_MMAP */
   607 #endif  /* DEFAULT_MMAP_THRESHOLD */
   608 #ifndef USE_BUILTIN_FFS
   609 #define USE_BUILTIN_FFS 0
   610 #endif  /* USE_BUILTIN_FFS */
   611 #ifndef USE_DEV_RANDOM
   612 #define USE_DEV_RANDOM 0
   613 #endif  /* USE_DEV_RANDOM */
   614 #ifndef NO_MALLINFO
   615 #define NO_MALLINFO 0
   616 #endif  /* NO_MALLINFO */
   617 #ifndef MALLINFO_FIELD_TYPE
   618 #define MALLINFO_FIELD_TYPE size_t
   619 #endif  /* MALLINFO_FIELD_TYPE */
   620 
   621 /*
   622   mallopt tuning options.  SVID/XPG defines four standard parameter
   623   numbers for mallopt, normally defined in malloc.h.  None of these
   624   are used in this malloc, so setting them has no effect. But this
   625   malloc does support the following options.
   626 */
   627 
   628 #define M_TRIM_THRESHOLD     (-1)
   629 #define M_GRANULARITY        (-2)
   630 #define M_MMAP_THRESHOLD     (-3)
   631 
   632 /* ------------------------ Mallinfo declarations ------------------------ */
   633 
   634 #if !NO_MALLINFO
   635 /*
   636   This version of malloc supports the standard SVID/XPG mallinfo
   637   routine that returns a struct containing usage properties and
   638   statistics. It should work on any system that has a
   639   /usr/include/malloc.h defining struct mallinfo.  The main
   640   declaration needed is the mallinfo struct that is returned (by-copy)
   641   by mallinfo().  The malloinfo struct contains a bunch of fields that
   642   are not even meaningful in this version of malloc.  These fields are
   643   are instead filled by mallinfo() with other numbers that might be of
   644   interest.
   645 
   646   HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   647   /usr/include/malloc.h file that includes a declaration of struct
   648   mallinfo.  If so, it is included; else a compliant version is
   649   declared below.  These must be precisely the same for mallinfo() to
   650   work.  The original SVID version of this struct, defined on most
   651   systems with mallinfo, declares all fields as ints. But some others
   652   define as unsigned long. If your system defines the fields using a
   653   type of different width than listed here, you MUST #include your
   654   system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   655 */
   656 
   657 /* #define HAVE_USR_INCLUDE_MALLOC_H */
   658 
   659 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   660 #include "/usr/include/malloc.h"
   661 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   662 
   663 struct mallinfo {
   664   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   665   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   666   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   667   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   668   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   669   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   670   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   671   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   672   MALLINFO_FIELD_TYPE fordblks; /* total free space */
   673   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   674 };
   675 
   676 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   677 #endif /* NO_MALLINFO */
   678 
   679 #ifdef __cplusplus
   680 extern "C" {
   681 #endif /* __cplusplus */
   682 
   683 #if !ONLY_MSPACES
   684 
   685 /* ------------------- Declarations of public routines ------------------- */
   686 
   687 #ifndef USE_DL_PREFIX
   688 #define dlcalloc               calloc
   689 #define dlfree                 free
   690 #define dlmalloc               malloc
   691 #define dlmemalign             memalign
   692 #define dlrealloc              realloc
   693 #define dlvalloc               valloc
   694 #define dlpvalloc              pvalloc
   695 #define dlmallinfo             mallinfo
   696 #define dlmallopt              mallopt
   697 #define dlmalloc_trim          malloc_trim
   698 #define dlmalloc_stats         malloc_stats
   699 #define dlmalloc_usable_size   malloc_usable_size
   700 #define dlmalloc_footprint     malloc_footprint
   701 #define dlmalloc_max_footprint malloc_max_footprint
   702 #define dlindependent_calloc   independent_calloc
   703 #define dlindependent_comalloc independent_comalloc
   704 #endif /* USE_DL_PREFIX */
   705 
   706 
   707 /*
   708   malloc(size_t n)
   709   Returns a pointer to a newly allocated chunk of at least n bytes, or
   710   null if no space is available, in which case errno is set to ENOMEM
   711   on ANSI C systems.
   712 
   713   If n is zero, malloc returns a minimum-sized chunk. (The minimum
   714   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   715   systems.)  Note that size_t is an unsigned type, so calls with
   716   arguments that would be negative if signed are interpreted as
   717   requests for huge amounts of space, which will often fail. The
   718   maximum supported value of n differs across systems, but is in all
   719   cases less than the maximum representable value of a size_t.
   720 */
   721 void* dlmalloc(size_t);
   722 
   723 /*
   724   free(void* p)
   725   Releases the chunk of memory pointed to by p, that had been previously
   726   allocated using malloc or a related routine such as realloc.
   727   It has no effect if p is null. If p was not malloced or already
   728   freed, free(p) will by default cause the current program to abort.
   729 */
   730 void  dlfree(void*);
   731 
   732 /*
   733   calloc(size_t n_elements, size_t element_size);
   734   Returns a pointer to n_elements * element_size bytes, with all locations
   735   set to zero.
   736 */
   737 void* dlcalloc(size_t, size_t);
   738 
   739 /*
   740   realloc(void* p, size_t n)
   741   Returns a pointer to a chunk of size n that contains the same data
   742   as does chunk p up to the minimum of (n, p's size) bytes, or null
   743   if no space is available.
   744 
   745   The returned pointer may or may not be the same as p. The algorithm
   746   prefers extending p in most cases when possible, otherwise it
   747   employs the equivalent of a malloc-copy-free sequence.
   748 
   749   If p is null, realloc is equivalent to malloc.
   750 
   751   If space is not available, realloc returns null, errno is set (if on
   752   ANSI) and p is NOT freed.
   753 
   754   if n is for fewer bytes than already held by p, the newly unused
   755   space is lopped off and freed if possible.  realloc with a size
   756   argument of zero (re)allocates a minimum-sized chunk.
   757 
   758   The old unix realloc convention of allowing the last-free'd chunk
   759   to be used as an argument to realloc is not supported.
   760 */
   761 
   762 void* dlrealloc(void*, size_t);
   763 
   764 /*
   765   memalign(size_t alignment, size_t n);
   766   Returns a pointer to a newly allocated chunk of n bytes, aligned
   767   in accord with the alignment argument.
   768 
   769   The alignment argument should be a power of two. If the argument is
   770   not a power of two, the nearest greater power is used.
   771   8-byte alignment is guaranteed by normal malloc calls, so don't
   772   bother calling memalign with an argument of 8 or less.
   773 
   774   Overreliance on memalign is a sure way to fragment space.
   775 */
   776 void* dlmemalign(size_t, size_t);
   777 
   778 /*
   779   valloc(size_t n);
   780   Equivalent to memalign(pagesize, n), where pagesize is the page
   781   size of the system. If the pagesize is unknown, 4096 is used.
   782 */
   783 void* dlvalloc(size_t);
   784 
   785 /*
   786   mallopt(int parameter_number, int parameter_value)
   787   Sets tunable parameters The format is to provide a
   788   (parameter-number, parameter-value) pair.  mallopt then sets the
   789   corresponding parameter to the argument value if it can (i.e., so
   790   long as the value is meaningful), and returns 1 if successful else
   791   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   792   normally defined in malloc.h.  None of these are use in this malloc,
   793   so setting them has no effect. But this malloc also supports other
   794   options in mallopt. See below for details.  Briefly, supported
   795   parameters are as follows (listed defaults are for "typical"
   796   configurations).
   797 
   798   Symbol            param #  default    allowed param values
   799   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   800   M_GRANULARITY        -2     page size   any power of 2 >= page size
   801   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   802 */
   803 int dlmallopt(int, int);
   804 
   805 /*
   806   malloc_footprint();
   807   Returns the number of bytes obtained from the system.  The total
   808   number of bytes allocated by malloc, realloc etc., is less than this
   809   value. Unlike mallinfo, this function returns only a precomputed
   810   result, so can be called frequently to monitor memory consumption.
   811   Even if locks are otherwise defined, this function does not use them,
   812   so results might not be up to date.
   813 */
   814 size_t dlmalloc_footprint(void);
   815 
   816 /*
   817   malloc_max_footprint();
   818   Returns the maximum number of bytes obtained from the system. This
   819   value will be greater than current footprint if deallocated space
   820   has been reclaimed by the system. The peak number of bytes allocated
   821   by malloc, realloc etc., is less than this value. Unlike mallinfo,
   822   this function returns only a precomputed result, so can be called
   823   frequently to monitor memory consumption.  Even if locks are
   824   otherwise defined, this function does not use them, so results might
   825   not be up to date.
   826 */
   827 size_t dlmalloc_max_footprint(void);
   828 
   829 #if !NO_MALLINFO
   830 /*
   831   mallinfo()
   832   Returns (by copy) a struct containing various summary statistics:
   833 
   834   arena:     current total non-mmapped bytes allocated from system
   835   ordblks:   the number of free chunks
   836   smblks:    always zero.
   837   hblks:     current number of mmapped regions
   838   hblkhd:    total bytes held in mmapped regions
   839   usmblks:   the maximum total allocated space. This will be greater
   840                 than current total if trimming has occurred.
   841   fsmblks:   always zero
   842   uordblks:  current total allocated space (normal or mmapped)
   843   fordblks:  total free space
   844   keepcost:  the maximum number of bytes that could ideally be released
   845                back to system via malloc_trim. ("ideally" means that
   846                it ignores page restrictions etc.)
   847 
   848   Because these fields are ints, but internal bookkeeping may
   849   be kept as longs, the reported values may wrap around zero and
   850   thus be inaccurate.
   851 */
   852 struct mallinfo dlmallinfo(void);
   853 #endif /* NO_MALLINFO */
   854 
   855 /*
   856   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   857 
   858   independent_calloc is similar to calloc, but instead of returning a
   859   single cleared space, it returns an array of pointers to n_elements
   860   independent elements that can hold contents of size elem_size, each
   861   of which starts out cleared, and can be independently freed,
   862   realloc'ed etc. The elements are guaranteed to be adjacently
   863   allocated (this is not guaranteed to occur with multiple callocs or
   864   mallocs), which may also improve cache locality in some
   865   applications.
   866 
   867   The "chunks" argument is optional (i.e., may be null, which is
   868   probably the most typical usage). If it is null, the returned array
   869   is itself dynamically allocated and should also be freed when it is
   870   no longer needed. Otherwise, the chunks array must be of at least
   871   n_elements in length. It is filled in with the pointers to the
   872   chunks.
   873 
   874   In either case, independent_calloc returns this pointer array, or
   875   null if the allocation failed.  If n_elements is zero and "chunks"
   876   is null, it returns a chunk representing an array with zero elements
   877   (which should be freed if not wanted).
   878 
   879   Each element must be individually freed when it is no longer
   880   needed. If you'd like to instead be able to free all at once, you
   881   should instead use regular calloc and assign pointers into this
   882   space to represent elements.  (In this case though, you cannot
   883   independently free elements.)
   884 
   885   independent_calloc simplifies and speeds up implementations of many
   886   kinds of pools.  It may also be useful when constructing large data
   887   structures that initially have a fixed number of fixed-sized nodes,
   888   but the number is not known at compile time, and some of the nodes
   889   may later need to be freed. For example:
   890 
   891   struct Node { int item; struct Node* next; };
   892 
   893   struct Node* build_list() {
   894     struct Node** pool;
   895     int n = read_number_of_nodes_needed();
   896     if (n <= 0) return 0;
   897     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   898     if (pool == 0) die();
   899     // organize into a linked list...
   900     struct Node* first = pool[0];
   901     for (i = 0; i < n-1; ++i)
   902       pool[i]->next = pool[i+1];
   903     free(pool);     // Can now free the array (or not, if it is needed later)
   904     return first;
   905   }
   906 */
   907 void** dlindependent_calloc(size_t, size_t, void**);
   908 
   909 /*
   910   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   911 
   912   independent_comalloc allocates, all at once, a set of n_elements
   913   chunks with sizes indicated in the "sizes" array.    It returns
   914   an array of pointers to these elements, each of which can be
   915   independently freed, realloc'ed etc. The elements are guaranteed to
   916   be adjacently allocated (this is not guaranteed to occur with
   917   multiple callocs or mallocs), which may also improve cache locality
   918   in some applications.
   919 
   920   The "chunks" argument is optional (i.e., may be null). If it is null
   921   the returned array is itself dynamically allocated and should also
   922   be freed when it is no longer needed. Otherwise, the chunks array
   923   must be of at least n_elements in length. It is filled in with the
   924   pointers to the chunks.
   925 
   926   In either case, independent_comalloc returns this pointer array, or
   927   null if the allocation failed.  If n_elements is zero and chunks is
   928   null, it returns a chunk representing an array with zero elements
   929   (which should be freed if not wanted).
   930 
   931   Each element must be individually freed when it is no longer
   932   needed. If you'd like to instead be able to free all at once, you
   933   should instead use a single regular malloc, and assign pointers at
   934   particular offsets in the aggregate space. (In this case though, you
   935   cannot independently free elements.)
   936 
   937   independent_comallac differs from independent_calloc in that each
   938   element may have a different size, and also that it does not
   939   automatically clear elements.
   940 
   941   independent_comalloc can be used to speed up allocation in cases
   942   where several structs or objects must always be allocated at the
   943   same time.  For example:
   944 
   945   struct Head { ... }
   946   struct Foot { ... }
   947 
   948   void send_message(char* msg) {
   949     int msglen = strlen(msg);
   950     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   951     void* chunks[3];
   952     if (independent_comalloc(3, sizes, chunks) == 0)
   953       die();
   954     struct Head* head = (struct Head*)(chunks[0]);
   955     char*        body = (char*)(chunks[1]);
   956     struct Foot* foot = (struct Foot*)(chunks[2]);
   957     // ...
   958   }
   959 
   960   In general though, independent_comalloc is worth using only for
   961   larger values of n_elements. For small values, you probably won't
   962   detect enough difference from series of malloc calls to bother.
   963 
   964   Overuse of independent_comalloc can increase overall memory usage,
   965   since it cannot reuse existing noncontiguous small chunks that
   966   might be available for some of the elements.
   967 */
   968 void** dlindependent_comalloc(size_t, size_t*, void**);
   969 
   970 
   971 /*
   972   pvalloc(size_t n);
   973   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   974   round up n to nearest pagesize.
   975  */
   976 void*  dlpvalloc(size_t);
   977 
   978 /*
   979   malloc_trim(size_t pad);
   980 
   981   If possible, gives memory back to the system (via negative arguments
   982   to sbrk) if there is unused memory at the `high' end of the malloc
   983   pool or in unused MMAP segments. You can call this after freeing
   984   large blocks of memory to potentially reduce the system-level memory
   985   requirements of a program. However, it cannot guarantee to reduce
   986   memory. Under some allocation patterns, some large free blocks of
   987   memory will be locked between two used chunks, so they cannot be
   988   given back to the system.
   989 
   990   The `pad' argument to malloc_trim represents the amount of free
   991   trailing space to leave untrimmed. If this argument is zero, only
   992   the minimum amount of memory to maintain internal data structures
   993   will be left. Non-zero arguments can be supplied to maintain enough
   994   trailing space to service future expected allocations without having
   995   to re-obtain memory from the system.
   996 
   997   Malloc_trim returns 1 if it actually released any memory, else 0.
   998 */
   999 int  dlmalloc_trim(size_t);
  1000 
  1001 /*
  1002   malloc_usable_size(void* p);
  1003 
  1004   Returns the number of bytes you can actually use in
  1005   an allocated chunk, which may be more than you requested (although
  1006   often not) due to alignment and minimum size constraints.
  1007   You can use this many bytes without worrying about
  1008   overwriting other allocated objects. This is not a particularly great
  1009   programming practice. malloc_usable_size can be more useful in
  1010   debugging and assertions, for example:
  1011 
  1012   p = malloc(n);
  1013   assert(malloc_usable_size(p) >= 256);
  1014 */
  1015 size_t dlmalloc_usable_size(void*);
  1016 
  1017 /*
  1018   malloc_stats();
  1019   Prints on stderr the amount of space obtained from the system (both
  1020   via sbrk and mmap), the maximum amount (which may be more than
  1021   current if malloc_trim and/or munmap got called), and the current
  1022   number of bytes allocated via malloc (or realloc, etc) but not yet
  1023   freed. Note that this is the number of bytes allocated, not the
  1024   number requested. It will be larger than the number requested
  1025   because of alignment and bookkeeping overhead. Because it includes
  1026   alignment wastage as being in use, this figure may be greater than
  1027   zero even when no user-level chunks are allocated.
  1028 
  1029   The reported current and maximum system memory can be inaccurate if
  1030   a program makes other calls to system memory allocation functions
  1031   (normally sbrk) outside of malloc.
  1032 
  1033   malloc_stats prints only the most commonly interesting statistics.
  1034   More information can be obtained by calling mallinfo.
  1035 */
  1036 void  dlmalloc_stats(void);
  1037 
  1038 #endif /* ONLY_MSPACES */
  1039 
  1040 #if MSPACES
  1041 
  1042 /*
  1043   mspace is an opaque type representing an independent
  1044   region of space that supports mspace_malloc, etc.
  1045 */
  1046 typedef void* mspace;
  1047 
  1048 /*
  1049   create_mspace creates and returns a new independent space with the
  1050   given initial capacity, or, if 0, the default granularity size.  It
  1051   returns null if there is no system memory available to create the
  1052   space.  If argument locked is non-zero, the space uses a separate
  1053   lock to control access. The capacity of the space will grow
  1054   dynamically as needed to service mspace_malloc requests.  You can
  1055   control the sizes of incremental increases of this space by
  1056   compiling with a different DEFAULT_GRANULARITY or dynamically
  1057   setting with mallopt(M_GRANULARITY, value).
  1058 */
  1059 mspace create_mspace(size_t capacity, int locked);
  1060 
  1061 /*
  1062   destroy_mspace destroys the given space, and attempts to return all
  1063   of its memory back to the system, returning the total number of
  1064   bytes freed. After destruction, the results of access to all memory
  1065   used by the space become undefined.
  1066 */
  1067 size_t destroy_mspace(mspace msp);
  1068 
  1069 /*
  1070   create_mspace_with_base uses the memory supplied as the initial base
  1071   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1072   space is used for bookkeeping, so the capacity must be at least this
  1073   large. (Otherwise 0 is returned.) When this initial space is
  1074   exhausted, additional memory will be obtained from the system.
  1075   Destroying this space will deallocate all additionally allocated
  1076   space (if possible) but not the initial base.
  1077 */
  1078 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1079 
  1080 /*
  1081   mspace_malloc behaves as malloc, but operates within
  1082   the given space.
  1083 */
  1084 void* mspace_malloc(mspace msp, size_t bytes);
  1085 
  1086 /*
  1087   mspace_free behaves as free, but operates within
  1088   the given space.
  1089 
  1090   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1091   free may be called instead of mspace_free because freed chunks from
  1092   any space are handled by their originating spaces.
  1093 */
  1094 void mspace_free(mspace msp, void* mem);
  1095 
  1096 /*
  1097   mspace_realloc behaves as realloc, but operates within
  1098   the given space.
  1099 
  1100   If compiled with FOOTERS==1, mspace_realloc is not actually
  1101   needed.  realloc may be called instead of mspace_realloc because
  1102   realloced chunks from any space are handled by their originating
  1103   spaces.
  1104 */
  1105 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1106 
  1107 /*
  1108   mspace_calloc behaves as calloc, but operates within
  1109   the given space.
  1110 */
  1111 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1112 
  1113 /*
  1114   mspace_memalign behaves as memalign, but operates within
  1115   the given space.
  1116 */
  1117 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1118 
  1119 /*
  1120   mspace_independent_calloc behaves as independent_calloc, but
  1121   operates within the given space.
  1122 */
  1123 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1124                                  size_t elem_size, void* chunks[]);
  1125 
  1126 /*
  1127   mspace_independent_comalloc behaves as independent_comalloc, but
  1128   operates within the given space.
  1129 */
  1130 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1131                                    size_t sizes[], void* chunks[]);
  1132 
  1133 /*
  1134   mspace_footprint() returns the number of bytes obtained from the
  1135   system for this space.
  1136 */
  1137 size_t mspace_footprint(mspace msp);
  1138 
  1139 /*
  1140   mspace_max_footprint() returns the peak number of bytes obtained from the
  1141   system for this space.
  1142 */
  1143 size_t mspace_max_footprint(mspace msp);
  1144 
  1145 
  1146 #if !NO_MALLINFO
  1147 /*
  1148   mspace_mallinfo behaves as mallinfo, but reports properties of
  1149   the given space.
  1150 */
  1151 struct mallinfo mspace_mallinfo(mspace msp);
  1152 #endif /* NO_MALLINFO */
  1153 
  1154 /*
  1155   mspace_malloc_stats behaves as malloc_stats, but reports
  1156   properties of the given space.
  1157 */
  1158 void mspace_malloc_stats(mspace msp);
  1159 
  1160 /*
  1161   mspace_trim behaves as malloc_trim, but
  1162   operates within the given space.
  1163 */
  1164 int mspace_trim(mspace msp, size_t pad);
  1165 
  1166 /*
  1167   An alias for mallopt.
  1168 */
  1169 int mspace_mallopt(int, int);
  1170 
  1171 #endif /* MSPACES */
  1172 
  1173 #ifdef __cplusplus
  1174 };  /* end of extern "C" */
  1175 #endif /* __cplusplus */
  1176 
  1177 /*
  1178   ========================================================================
  1179   To make a fully customizable malloc.h header file, cut everything
  1180   above this line, put into file malloc.h, edit to suit, and #include it
  1181   on the next line, as well as in programs that use this malloc.
  1182   ========================================================================
  1183 */
  1184 
  1185 /* #include "malloc.h" */
  1186 
  1187 /*------------------------------ internal #includes ---------------------- */
  1188 
  1189 #ifdef _MSC_VER
  1190 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1191 #endif /* _MSC_VER */
  1192 
  1193 #ifndef LACKS_STDIO_H
  1194 #include <stdio.h>       /* for printing in malloc_stats */
  1195 #endif
  1196 
  1197 #ifndef LACKS_ERRNO_H
  1198 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1199 #endif /* LACKS_ERRNO_H */
  1200 #if FOOTERS
  1201 #include <time.h>        /* for magic initialization */
  1202 #endif /* FOOTERS */
  1203 #ifndef LACKS_STDLIB_H
  1204 #include <stdlib.h>      /* for abort() */
  1205 #endif /* LACKS_STDLIB_H */
  1206 #ifdef DEBUG
  1207 #if ABORT_ON_ASSERT_FAILURE
  1208 #define assert(x) if(!(x)) ABORT
  1209 #else /* ABORT_ON_ASSERT_FAILURE */
  1210 #include <assert.h>
  1211 #endif /* ABORT_ON_ASSERT_FAILURE */
  1212 #else  /* DEBUG */
  1213 #define assert(x)
  1214 #endif /* DEBUG */
  1215 #ifndef LACKS_STRING_H
  1216 #include <string.h>      /* for memset etc */
  1217 #endif  /* LACKS_STRING_H */
  1218 #if USE_BUILTIN_FFS
  1219 #ifndef LACKS_STRINGS_H
  1220 #include <strings.h>     /* for ffs */
  1221 #endif /* LACKS_STRINGS_H */
  1222 #endif /* USE_BUILTIN_FFS */
  1223 #if HAVE_MMAP
  1224 #ifndef LACKS_SYS_MMAN_H
  1225 #include <sys/mman.h>    /* for mmap */
  1226 #endif /* LACKS_SYS_MMAN_H */
  1227 #ifndef LACKS_FCNTL_H
  1228 #include <fcntl.h>
  1229 #endif /* LACKS_FCNTL_H */
  1230 #endif /* HAVE_MMAP */
  1231 #if HAVE_MORECORE
  1232 #ifndef LACKS_UNISTD_H
  1233 #include <unistd.h>     /* for sbrk */
  1234 #else /* LACKS_UNISTD_H */
  1235 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1236 extern void*     sbrk(ptrdiff_t);
  1237 #endif /* FreeBSD etc */
  1238 #endif /* LACKS_UNISTD_H */
  1239 #endif /* HAVE_MMAP */
  1240 
  1241 #ifndef WIN32
  1242 #ifndef malloc_getpagesize
  1243 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1244 #    ifndef _SC_PAGE_SIZE
  1245 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1246 #    endif
  1247 #  endif
  1248 #  ifdef _SC_PAGE_SIZE
  1249 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1250 #  else
  1251 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1252        extern size_t getpagesize();
  1253 #      define malloc_getpagesize getpagesize()
  1254 #    else
  1255 #      ifdef WIN32 /* use supplied emulation of getpagesize */
  1256 #        define malloc_getpagesize getpagesize()
  1257 #      else
  1258 #        ifndef LACKS_SYS_PARAM_H
  1259 #          include <sys/param.h>
  1260 #        endif
  1261 #        ifdef EXEC_PAGESIZE
  1262 #          define malloc_getpagesize EXEC_PAGESIZE
  1263 #        else
  1264 #          ifdef NBPG
  1265 #            ifndef CLSIZE
  1266 #              define malloc_getpagesize NBPG
  1267 #            else
  1268 #              define malloc_getpagesize (NBPG * CLSIZE)
  1269 #            endif
  1270 #          else
  1271 #            ifdef NBPC
  1272 #              define malloc_getpagesize NBPC
  1273 #            else
  1274 #              ifdef PAGESIZE
  1275 #                define malloc_getpagesize PAGESIZE
  1276 #              else /* just guess */
  1277 #                define malloc_getpagesize ((size_t)4096U)
  1278 #              endif
  1279 #            endif
  1280 #          endif
  1281 #        endif
  1282 #      endif
  1283 #    endif
  1284 #  endif
  1285 #endif
  1286 #endif
  1287 
  1288 /* ------------------- size_t and alignment properties -------------------- */
  1289 
  1290 /* The byte and bit size of a size_t */
  1291 #define SIZE_T_SIZE         (sizeof(size_t))
  1292 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1293 
  1294 /* Some constants coerced to size_t */
  1295 /* Annoying but necessary to avoid errors on some plaftorms */
  1296 #define SIZE_T_ZERO         ((size_t)0)
  1297 #define SIZE_T_ONE          ((size_t)1)
  1298 #define SIZE_T_TWO          ((size_t)2)
  1299 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1300 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1301 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1302 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1303 
  1304 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1305 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1306 
  1307 /* True if address a has acceptable alignment */
  1308 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1309 
  1310 /* the number of bytes to offset an address to align it */
  1311 #define align_offset(A)\
  1312  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1313   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1314 
  1315 /* -------------------------- MMAP preliminaries ------------------------- */
  1316 
  1317 /*
  1318    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1319    checks to fail so compiler optimizer can delete code rather than
  1320    using so many "#if"s.
  1321 */
  1322 
  1323 
  1324 /* MORECORE and MMAP must return MFAIL on failure */
  1325 #define MFAIL                ((void*)(MAX_SIZE_T))
  1326 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1327 
  1328 #if !HAVE_MMAP
  1329 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1330 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1331 #define CALL_MMAP(s)         MFAIL
  1332 #define CALL_MUNMAP(a, s)    (-1)
  1333 #define DIRECT_MMAP(s)       MFAIL
  1334 
  1335 #else /* HAVE_MMAP */
  1336 #define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1337 #define USE_MMAP_BIT         (SIZE_T_ONE)
  1338 
  1339 #ifndef WIN32
  1340 #define CALL_MUNMAP(a, s)    munmap((a), (s))
  1341 #define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1342 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1343 #define MAP_ANONYMOUS        MAP_ANON
  1344 #endif /* MAP_ANON */
  1345 #ifdef MAP_ANONYMOUS
  1346 #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1347 #define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1348 #else /* MAP_ANONYMOUS */
  1349 /*
  1350    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1351    is unlikely to be needed, but is supplied just in case.
  1352 */
  1353 #define MMAP_FLAGS           (MAP_PRIVATE)
  1354 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1355 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1356            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1357             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1358             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1359 #endif /* MAP_ANONYMOUS */
  1360 
  1361 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1362 #else /* WIN32 */
  1363 
  1364 /* Win32 MMAP via VirtualAlloc */
  1365 static void* win32mmap(size_t size) {
  1366   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1367   return (ptr != 0)? ptr: MFAIL;
  1368 }
  1369 
  1370 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1371 static void* win32direct_mmap(size_t size) {
  1372   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1373                            PAGE_READWRITE);
  1374   return (ptr != 0)? ptr: MFAIL;
  1375 }
  1376 
  1377 /* This function supports releasing coalesed segments */
  1378 static int win32munmap(void* ptr, size_t size) {
  1379   MEMORY_BASIC_INFORMATION minfo;
  1380   char* cptr = ptr;
  1381   while (size) {
  1382     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1383       return -1;
  1384     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1385         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1386       return -1;
  1387     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1388       return -1;
  1389     cptr += minfo.RegionSize;
  1390     size -= minfo.RegionSize;
  1391   }
  1392   return 0;
  1393 }
  1394 
  1395 #define CALL_MMAP(s)         win32mmap(s)
  1396 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1397 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1398 #endif /* WIN32 */
  1399 #endif /* HAVE_MMAP */
  1400 
  1401 #if HAVE_MMAP && HAVE_MREMAP
  1402 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1403 #else  /* HAVE_MMAP && HAVE_MREMAP */
  1404 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1405 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1406 
  1407 #if HAVE_MORECORE
  1408 #define CALL_MORECORE(S)     MORECORE(S)
  1409 #else  /* HAVE_MORECORE */
  1410 #define CALL_MORECORE(S)     MFAIL
  1411 #endif /* HAVE_MORECORE */
  1412 
  1413 /* mstate bit set if continguous morecore disabled or failed */
  1414 #define USE_NONCONTIGUOUS_BIT (4U)
  1415 
  1416 /* segment bit set in create_mspace_with_base */
  1417 #define EXTERN_BIT            (8U)
  1418 
  1419 
  1420 /* --------------------------- Lock preliminaries ------------------------ */
  1421 
  1422 #if USE_LOCKS
  1423 
  1424 /*
  1425   When locks are defined, there are up to two global locks:
  1426 
  1427   * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1428     MORECORE.  In many cases sys_alloc requires two calls, that should
  1429     not be interleaved with calls by other threads.  This does not
  1430     protect against direct calls to MORECORE by other threads not
  1431     using this lock, so there is still code to cope the best we can on
  1432     interference.
  1433 
  1434   * magic_init_mutex ensures that mparams.magic and other
  1435     unique mparams values are initialized only once.
  1436 */
  1437 
  1438 #ifndef WIN32
  1439 /* By default use posix locks */
  1440 #include <pthread.h>
  1441 #define MLOCK_T pthread_mutex_t
  1442 #define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1443 #define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1444 #define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1445 
  1446 #if HAVE_MORECORE
  1447 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1448 #endif /* HAVE_MORECORE */
  1449 
  1450 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1451 
  1452 #else /* WIN32 */
  1453 /*
  1454    Because lock-protected regions have bounded times, and there
  1455    are no recursive lock calls, we can use simple spinlocks.
  1456 */
  1457 
  1458 #define MLOCK_T long
  1459 static int win32_acquire_lock (MLOCK_T *sl) {
  1460   for (;;) {
  1461 #ifdef InterlockedCompareExchangePointer
  1462     if (!InterlockedCompareExchange(sl, 1, 0))
  1463       return 0;
  1464 #else  /* Use older void* version */
  1465     if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1466       return 0;
  1467 #endif /* InterlockedCompareExchangePointer */
  1468     Sleep (0);
  1469   }
  1470 }
  1471 
  1472 static void win32_release_lock (MLOCK_T *sl) {
  1473   InterlockedExchange (sl, 0);
  1474 }
  1475 
  1476 #define INITIAL_LOCK(l)      *(l)=0
  1477 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1478 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1479 #if HAVE_MORECORE
  1480 static MLOCK_T morecore_mutex;
  1481 #endif /* HAVE_MORECORE */
  1482 static MLOCK_T magic_init_mutex;
  1483 #endif /* WIN32 */
  1484 
  1485 #define USE_LOCK_BIT               (2U)
  1486 #else  /* USE_LOCKS */
  1487 #define USE_LOCK_BIT               (0U)
  1488 #define INITIAL_LOCK(l)
  1489 #endif /* USE_LOCKS */
  1490 
  1491 #if USE_LOCKS && HAVE_MORECORE
  1492 #define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1493 #define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1494 #else /* USE_LOCKS && HAVE_MORECORE */
  1495 #define ACQUIRE_MORECORE_LOCK()
  1496 #define RELEASE_MORECORE_LOCK()
  1497 #endif /* USE_LOCKS && HAVE_MORECORE */
  1498 
  1499 #if USE_LOCKS
  1500 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1501 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1502 #else  /* USE_LOCKS */
  1503 #define ACQUIRE_MAGIC_INIT_LOCK()
  1504 #define RELEASE_MAGIC_INIT_LOCK()
  1505 #endif /* USE_LOCKS */
  1506 
  1507 
  1508 /* -----------------------  Chunk representations ------------------------ */
  1509 
  1510 /*
  1511   (The following includes lightly edited explanations by Colin Plumb.)
  1512 
  1513   The malloc_chunk declaration below is misleading (but accurate and
  1514   necessary).  It declares a "view" into memory allowing access to
  1515   necessary fields at known offsets from a given base.
  1516 
  1517   Chunks of memory are maintained using a `boundary tag' method as
  1518   originally described by Knuth.  (See the paper by Paul Wilson
  1519   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1520   techniques.)  Sizes of free chunks are stored both in the front of
  1521   each chunk and at the end.  This makes consolidating fragmented
  1522   chunks into bigger chunks fast.  The head fields also hold bits
  1523   representing whether chunks are free or in use.
  1524 
  1525   Here are some pictures to make it clearer.  They are "exploded" to
  1526   show that the state of a chunk can be thought of as extending from
  1527   the high 31 bits of the head field of its header through the
  1528   prev_foot and PINUSE_BIT bit of the following chunk header.
  1529 
  1530   A chunk that's in use looks like:
  1531 
  1532    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1533            | Size of previous chunk (if P = 1)                             |
  1534            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1535          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1536          | Size of this chunk                                         1| +-+
  1537    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1538          |                                                               |
  1539          +-                                                             -+
  1540          |                                                               |
  1541          +-                                                             -+
  1542          |                                                               :
  1543          +-      size - sizeof(size_t) available payload bytes          -+
  1544          :                                                               |
  1545  chunk-> +-                                                             -+
  1546          |                                                               |
  1547          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1548        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1549        | Size of next chunk (may or may not be in use)               | +-+
  1550  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1551 
  1552     And if it's free, it looks like this:
  1553 
  1554    chunk-> +-                                                             -+
  1555            | User payload (must be in use, or we would have merged!)       |
  1556            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1557          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1558          | Size of this chunk                                         0| +-+
  1559    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1560          | Next pointer                                                  |
  1561          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1562          | Prev pointer                                                  |
  1563          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1564          |                                                               :
  1565          +-      size - sizeof(struct chunk) unused bytes               -+
  1566          :                                                               |
  1567  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1568          | Size of this chunk                                            |
  1569          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1570        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1571        | Size of next chunk (must be in use, or we would have merged)| +-+
  1572  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1573        |                                                               :
  1574        +- User payload                                                -+
  1575        :                                                               |
  1576        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1577                                                                      |0|
  1578                                                                      +-+
  1579   Note that since we always merge adjacent free chunks, the chunks
  1580   adjacent to a free chunk must be in use.
  1581 
  1582   Given a pointer to a chunk (which can be derived trivially from the
  1583   payload pointer) we can, in O(1) time, find out whether the adjacent
  1584   chunks are free, and if so, unlink them from the lists that they
  1585   are on and merge them with the current chunk.
  1586 
  1587   Chunks always begin on even word boundaries, so the mem portion
  1588   (which is returned to the user) is also on an even word boundary, and
  1589   thus at least double-word aligned.
  1590 
  1591   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1592   chunk size (which is always a multiple of two words), is an in-use
  1593   bit for the *previous* chunk.  If that bit is *clear*, then the
  1594   word before the current chunk size contains the previous chunk
  1595   size, and can be used to find the front of the previous chunk.
  1596   The very first chunk allocated always has this bit set, preventing
  1597   access to non-existent (or non-owned) memory. If pinuse is set for
  1598   any given chunk, then you CANNOT determine the size of the
  1599   previous chunk, and might even get a memory addressing fault when
  1600   trying to do so.
  1601 
  1602   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1603   the chunk size redundantly records whether the current chunk is
  1604   inuse. This redundancy enables usage checks within free and realloc,
  1605   and reduces indirection when freeing and consolidating chunks.
  1606 
  1607   Each freshly allocated chunk must have both cinuse and pinuse set.
  1608   That is, each allocated chunk borders either a previously allocated
  1609   and still in-use chunk, or the base of its memory arena. This is
  1610   ensured by making all allocations from the the `lowest' part of any
  1611   found chunk.  Further, no free chunk physically borders another one,
  1612   so each free chunk is known to be preceded and followed by either
  1613   inuse chunks or the ends of memory.
  1614 
  1615   Note that the `foot' of the current chunk is actually represented
  1616   as the prev_foot of the NEXT chunk. This makes it easier to
  1617   deal with alignments etc but can be very confusing when trying
  1618   to extend or adapt this code.
  1619 
  1620   The exceptions to all this are
  1621 
  1622      1. The special chunk `top' is the top-most available chunk (i.e.,
  1623         the one bordering the end of available memory). It is treated
  1624         specially.  Top is never included in any bin, is used only if
  1625         no other chunk is available, and is released back to the
  1626         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1627         the top chunk is treated as larger (and thus less well
  1628         fitting) than any other available chunk.  The top chunk
  1629         doesn't update its trailing size field since there is no next
  1630         contiguous chunk that would have to index off it. However,
  1631         space is still allocated for it (TOP_FOOT_SIZE) to enable
  1632         separation or merging when space is extended.
  1633 
  1634      3. Chunks allocated via mmap, which have the lowest-order bit
  1635         (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1636         PINUSE_BIT in their head fields.  Because they are allocated
  1637         one-by-one, each must carry its own prev_foot field, which is
  1638         also used to hold the offset this chunk has within its mmapped
  1639         region, which is needed to preserve alignment. Each mmapped
  1640         chunk is trailed by the first two fields of a fake next-chunk
  1641         for sake of usage checks.
  1642 
  1643 */
  1644 
  1645 struct malloc_chunk {
  1646   size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1647   size_t               head;       /* Size and inuse bits. */
  1648   struct malloc_chunk* fd;         /* double links -- used only if free. */
  1649   struct malloc_chunk* bk;
  1650 };
  1651 
  1652 typedef struct malloc_chunk  mchunk;
  1653 typedef struct malloc_chunk* mchunkptr;
  1654 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1655 typedef unsigned int bindex_t;         /* Described below */
  1656 typedef unsigned int binmap_t;         /* Described below */
  1657 typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1658 
  1659 /* ------------------- Chunks sizes and alignments ----------------------- */
  1660 
  1661 #define MCHUNK_SIZE         (sizeof(mchunk))
  1662 
  1663 #if FOOTERS
  1664 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1665 #else /* FOOTERS */
  1666 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1667 #endif /* FOOTERS */
  1668 
  1669 /* MMapped chunks need a second word of overhead ... */
  1670 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1671 /* ... and additional padding for fake next-chunk at foot */
  1672 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1673 
  1674 /* The smallest size we can malloc is an aligned minimal chunk */
  1675 #define MIN_CHUNK_SIZE\
  1676   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1677 
  1678 /* conversion from malloc headers to user pointers, and back */
  1679 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1680 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1681 /* chunk associated with aligned address A */
  1682 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1683 
  1684 /* Bounds on request (not chunk) sizes. */
  1685 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1686 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1687 
  1688 /* pad request bytes into a usable size */
  1689 #define pad_request(req) \
  1690    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1691 
  1692 /* pad request, checking for minimum (but not maximum) */
  1693 #define request2size(req) \
  1694   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1695 
  1696 
  1697 /* ------------------ Operations on head and foot fields ----------------- */
  1698 
  1699 /*
  1700   The head field of a chunk is or'ed with PINUSE_BIT when previous
  1701   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1702   use. If the chunk was obtained with mmap, the prev_foot field has
  1703   IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1704   mmapped region to the base of the chunk.
  1705 */
  1706 
  1707 #define PINUSE_BIT          (SIZE_T_ONE)
  1708 #define CINUSE_BIT          (SIZE_T_TWO)
  1709 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1710 
  1711 /* Head value for fenceposts */
  1712 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1713 
  1714 /* extraction of fields from head words */
  1715 #define cinuse(p)           ((p)->head & CINUSE_BIT)
  1716 #define pinuse(p)           ((p)->head & PINUSE_BIT)
  1717 #define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1718 
  1719 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1720 #define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1721 
  1722 /* Treat space at ptr +/- offset as a chunk */
  1723 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1724 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1725 
  1726 /* Ptr to next or previous physical malloc_chunk. */
  1727 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1728 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1729 
  1730 /* extract next chunk's pinuse bit */
  1731 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1732 
  1733 /* Get/set size at footer */
  1734 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1735 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1736 
  1737 /* Set size, pinuse bit, and foot */
  1738 #define set_size_and_pinuse_of_free_chunk(p, s)\
  1739   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1740 
  1741 /* Set size, pinuse bit, foot, and clear next pinuse */
  1742 #define set_free_with_pinuse(p, s, n)\
  1743   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1744 
  1745 #define is_mmapped(p)\
  1746   (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1747 
  1748 /* Get the internal overhead associated with chunk p */
  1749 #define overhead_for(p)\
  1750  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1751 
  1752 /* Return true if malloced space is not necessarily cleared */
  1753 #if MMAP_CLEARS
  1754 #define calloc_must_clear(p) (!is_mmapped(p))
  1755 #else /* MMAP_CLEARS */
  1756 #define calloc_must_clear(p) (1)
  1757 #endif /* MMAP_CLEARS */
  1758 
  1759 /* ---------------------- Overlaid data structures ----------------------- */
  1760 
  1761 /*
  1762   When chunks are not in use, they are treated as nodes of either
  1763   lists or trees.
  1764 
  1765   "Small"  chunks are stored in circular doubly-linked lists, and look
  1766   like this:
  1767 
  1768     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1769             |             Size of previous chunk                            |
  1770             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1771     `head:' |             Size of chunk, in bytes                         |P|
  1772       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1773             |             Forward pointer to next chunk in list             |
  1774             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1775             |             Back pointer to previous chunk in list            |
  1776             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1777             |             Unused space (may be 0 bytes long)                .
  1778             .                                                               .
  1779             .                                                               |
  1780 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1781     `foot:' |             Size of chunk, in bytes                           |
  1782             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1783 
  1784   Larger chunks are kept in a form of bitwise digital trees (aka
  1785   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1786   free chunks greater than 256 bytes, their size doesn't impose any
  1787   constraints on user chunk sizes.  Each node looks like:
  1788 
  1789     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1790             |             Size of previous chunk                            |
  1791             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1792     `head:' |             Size of chunk, in bytes                         |P|
  1793       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1794             |             Forward pointer to next chunk of same size        |
  1795             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1796             |             Back pointer to previous chunk of same size       |
  1797             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1798             |             Pointer to left child (child[0])                  |
  1799             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1800             |             Pointer to right child (child[1])                 |
  1801             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1802             |             Pointer to parent                                 |
  1803             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1804             |             bin index of this chunk                           |
  1805             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1806             |             Unused space                                      .
  1807             .                                                               |
  1808 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1809     `foot:' |             Size of chunk, in bytes                           |
  1810             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1811 
  1812   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1813   of the same size are arranged in a circularly-linked list, with only
  1814   the oldest chunk (the next to be used, in our FIFO ordering)
  1815   actually in the tree.  (Tree members are distinguished by a non-null
  1816   parent pointer.)  If a chunk with the same size an an existing node
  1817   is inserted, it is linked off the existing node using pointers that
  1818   work in the same way as fd/bk pointers of small chunks.
  1819 
  1820   Each tree contains a power of 2 sized range of chunk sizes (the
  1821   smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1822   tree level, with the chunks in the smaller half of the range (0x100
  1823   <= x < 0x140 for the top nose) in the left subtree and the larger
  1824   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1825   done by inspecting individual bits.
  1826 
  1827   Using these rules, each node's left subtree contains all smaller
  1828   sizes than its right subtree.  However, the node at the root of each
  1829   subtree has no particular ordering relationship to either.  (The
  1830   dividing line between the subtree sizes is based on trie relation.)
  1831   If we remove the last chunk of a given size from the interior of the
  1832   tree, we need to replace it with a leaf node.  The tree ordering
  1833   rules permit a node to be replaced by any leaf below it.
  1834 
  1835   The smallest chunk in a tree (a common operation in a best-fit
  1836   allocator) can be found by walking a path to the leftmost leaf in
  1837   the tree.  Unlike a usual binary tree, where we follow left child
  1838   pointers until we reach a null, here we follow the right child
  1839   pointer any time the left one is null, until we reach a leaf with
  1840   both child pointers null. The smallest chunk in the tree will be
  1841   somewhere along that path.
  1842 
  1843   The worst case number of steps to add, find, or remove a node is
  1844   bounded by the number of bits differentiating chunks within
  1845   bins. Under current bin calculations, this ranges from 6 up to 21
  1846   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1847   is of course much better.
  1848 */
  1849 
  1850 struct malloc_tree_chunk {
  1851   /* The first four fields must be compatible with malloc_chunk */
  1852   size_t                    prev_foot;
  1853   size_t                    head;
  1854   struct malloc_tree_chunk* fd;
  1855   struct malloc_tree_chunk* bk;
  1856 
  1857   struct malloc_tree_chunk* child[2];
  1858   struct malloc_tree_chunk* parent;
  1859   bindex_t                  index;
  1860 };
  1861 
  1862 typedef struct malloc_tree_chunk  tchunk;
  1863 typedef struct malloc_tree_chunk* tchunkptr;
  1864 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1865 
  1866 /* A little helper macro for trees */
  1867 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1868 
  1869 /* ----------------------------- Segments -------------------------------- */
  1870 
  1871 /*
  1872   Each malloc space may include non-contiguous segments, held in a
  1873   list headed by an embedded malloc_segment record representing the
  1874   top-most space. Segments also include flags holding properties of
  1875   the space. Large chunks that are directly allocated by mmap are not
  1876   included in this list. They are instead independently created and
  1877   destroyed without otherwise keeping track of them.
  1878 
  1879   Segment management mainly comes into play for spaces allocated by
  1880   MMAP.  Any call to MMAP might or might not return memory that is
  1881   adjacent to an existing segment.  MORECORE normally contiguously
  1882   extends the current space, so this space is almost always adjacent,
  1883   which is simpler and faster to deal with. (This is why MORECORE is
  1884   used preferentially to MMAP when both are available -- see
  1885   sys_alloc.)  When allocating using MMAP, we don't use any of the
  1886   hinting mechanisms (inconsistently) supported in various
  1887   implementations of unix mmap, or distinguish reserving from
  1888   committing memory. Instead, we just ask for space, and exploit
  1889   contiguity when we get it.  It is probably possible to do
  1890   better than this on some systems, but no general scheme seems
  1891   to be significantly better.
  1892 
  1893   Management entails a simpler variant of the consolidation scheme
  1894   used for chunks to reduce fragmentation -- new adjacent memory is
  1895   normally prepended or appended to an existing segment. However,
  1896   there are limitations compared to chunk consolidation that mostly
  1897   reflect the fact that segment processing is relatively infrequent
  1898   (occurring only when getting memory from system) and that we
  1899   don't expect to have huge numbers of segments:
  1900 
  1901   * Segments are not indexed, so traversal requires linear scans.  (It
  1902     would be possible to index these, but is not worth the extra
  1903     overhead and complexity for most programs on most platforms.)
  1904   * New segments are only appended to old ones when holding top-most
  1905     memory; if they cannot be prepended to others, they are held in
  1906     different segments.
  1907 
  1908   Except for the top-most segment of an mstate, each segment record
  1909   is kept at the tail of its segment. Segments are added by pushing
  1910   segment records onto the list headed by &mstate.seg for the
  1911   containing mstate.
  1912 
  1913   Segment flags control allocation/merge/deallocation policies:
  1914   * If EXTERN_BIT set, then we did not allocate this segment,
  1915     and so should not try to deallocate or merge with others.
  1916     (This currently holds only for the initial segment passed
  1917     into create_mspace_with_base.)
  1918   * If IS_MMAPPED_BIT set, the segment may be merged with
  1919     other surrounding mmapped segments and trimmed/de-allocated
  1920     using munmap.
  1921   * If neither bit is set, then the segment was obtained using
  1922     MORECORE so can be merged with surrounding MORECORE'd segments
  1923     and deallocated/trimmed using MORECORE with negative arguments.
  1924 */
  1925 
  1926 struct malloc_segment {
  1927   char*        base;             /* base address */
  1928   size_t       size;             /* allocated size */
  1929   struct malloc_segment* next;   /* ptr to next segment */
  1930   flag_t       sflags;           /* mmap and extern flag */
  1931 };
  1932 
  1933 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1934 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1935 
  1936 typedef struct malloc_segment  msegment;
  1937 typedef struct malloc_segment* msegmentptr;
  1938 
  1939 /* ---------------------------- malloc_state ----------------------------- */
  1940 
  1941 /*
  1942    A malloc_state holds all of the bookkeeping for a space.
  1943    The main fields are:
  1944 
  1945   Top
  1946     The topmost chunk of the currently active segment. Its size is
  1947     cached in topsize.  The actual size of topmost space is
  1948     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  1949     fenceposts and segment records if necessary when getting more
  1950     space from the system.  The size at which to autotrim top is
  1951     cached from mparams in trim_check, except that it is disabled if
  1952     an autotrim fails.
  1953 
  1954   Designated victim (dv)
  1955     This is the preferred chunk for servicing small requests that
  1956     don't have exact fits.  It is normally the chunk split off most
  1957     recently to service another small request.  Its size is cached in
  1958     dvsize. The link fields of this chunk are not maintained since it
  1959     is not kept in a bin.
  1960 
  1961   SmallBins
  1962     An array of bin headers for free chunks.  These bins hold chunks
  1963     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  1964     chunks of all the same size, spaced 8 bytes apart.  To simplify
  1965     use in double-linked lists, each bin header acts as a malloc_chunk
  1966     pointing to the real first node, if it exists (else pointing to
  1967     itself).  This avoids special-casing for headers.  But to avoid
  1968     waste, we allocate only the fd/bk pointers of bins, and then use
  1969     repositioning tricks to treat these as the fields of a chunk.
  1970 
  1971   TreeBins
  1972     Treebins are pointers to the roots of trees holding a range of
  1973     sizes. There are 2 equally spaced treebins for each power of two
  1974     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  1975     larger.
  1976 
  1977   Bin maps
  1978     There is one bit map for small bins ("smallmap") and one for
  1979     treebins ("treemap).  Each bin sets its bit when non-empty, and
  1980     clears the bit when empty.  Bit operations are then used to avoid
  1981     bin-by-bin searching -- nearly all "search" is done without ever
  1982     looking at bins that won't be selected.  The bit maps
  1983     conservatively use 32 bits per map word, even if on 64bit system.
  1984     For a good description of some of the bit-based techniques used
  1985     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  1986     supplement at http://hackersdelight.org/). Many of these are
  1987     intended to reduce the branchiness of paths through malloc etc, as
  1988     well as to reduce the number of memory locations read or written.
  1989 
  1990   Segments
  1991     A list of segments headed by an embedded malloc_segment record
  1992     representing the initial space.
  1993 
  1994   Address check support
  1995     The least_addr field is the least address ever obtained from
  1996     MORECORE or MMAP. Attempted frees and reallocs of any address less
  1997     than this are trapped (unless INSECURE is defined).
  1998 
  1999   Magic tag
  2000     A cross-check field that should always hold same value as mparams.magic.
  2001 
  2002   Flags
  2003     Bits recording whether to use MMAP, locks, or contiguous MORECORE
  2004 
  2005   Statistics
  2006     Each space keeps track of current and maximum system memory
  2007     obtained via MORECORE or MMAP.
  2008 
  2009   Locking
  2010     If USE_LOCKS is defined, the "mutex" lock is acquired and released
  2011     around every public call using this mspace.
  2012 */
  2013 
  2014 /* Bin types, widths and sizes */
  2015 #define NSMALLBINS        (32U)
  2016 #define NTREEBINS         (32U)
  2017 #define SMALLBIN_SHIFT    (3U)
  2018 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  2019 #define TREEBIN_SHIFT     (8U)
  2020 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2021 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2022 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2023 
  2024 struct malloc_state {
  2025   binmap_t   smallmap;
  2026   binmap_t   treemap;
  2027   size_t     dvsize;
  2028   size_t     topsize;
  2029   char*      least_addr;
  2030   mchunkptr  dv;
  2031   mchunkptr  top;
  2032   size_t     trim_check;
  2033   size_t     magic;
  2034   mchunkptr  smallbins[(NSMALLBINS+1)*2];
  2035   tbinptr    treebins[NTREEBINS];
  2036   size_t     footprint;
  2037   size_t     max_footprint;
  2038   flag_t     mflags;
  2039 #if USE_LOCKS
  2040   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  2041 #endif /* USE_LOCKS */
  2042   msegment   seg;
  2043 };
  2044 
  2045 typedef struct malloc_state*    mstate;
  2046 
  2047 /* ------------- Global malloc_state and malloc_params ------------------- */
  2048 
  2049 /*
  2050   malloc_params holds global properties, including those that can be
  2051   dynamically set using mallopt. There is a single instance, mparams,
  2052   initialized in init_mparams.
  2053 */
  2054 
  2055 struct malloc_params {
  2056   size_t magic;
  2057   size_t page_size;
  2058   size_t granularity;
  2059   size_t mmap_threshold;
  2060   size_t trim_threshold;
  2061   flag_t default_mflags;
  2062 };
  2063 
  2064 static struct malloc_params mparams;
  2065 
  2066 /* The global malloc_state used for all non-"mspace" calls */
  2067 static struct malloc_state _gm_;
  2068 #define gm                 (&_gm_)
  2069 #define is_global(M)       ((M) == &_gm_)
  2070 #define is_initialized(M)  ((M)->top != 0)
  2071 
  2072 /* -------------------------- system alloc setup ------------------------- */
  2073 
  2074 /* Operations on mflags */
  2075 
  2076 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  2077 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  2078 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  2079 
  2080 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  2081 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  2082 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  2083 
  2084 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  2085 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  2086 
  2087 #define set_lock(M,L)\
  2088  ((M)->mflags = (L)?\
  2089   ((M)->mflags | USE_LOCK_BIT) :\
  2090   ((M)->mflags & ~USE_LOCK_BIT))
  2091 
  2092 /* page-align a size */
  2093 #define page_align(S)\
  2094  (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  2095 
  2096 /* granularity-align a size */
  2097 #define granularity_align(S)\
  2098   (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  2099 
  2100 #define is_page_aligned(S)\
  2101    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  2102 #define is_granularity_aligned(S)\
  2103    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  2104 
  2105 /*  True if segment S holds address A */
  2106 #define segment_holds(S, A)\
  2107   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2108 
  2109 /* Return segment holding given address */
  2110 static msegmentptr segment_holding(mstate m, char* addr) {
  2111   msegmentptr sp = &m->seg;
  2112   for (;;) {
  2113     if (addr >= sp->base && addr < sp->base + sp->size)
  2114       return sp;
  2115     if ((sp = sp->next) == 0)
  2116       return 0;
  2117   }
  2118 }
  2119 
  2120 /* Return true if segment contains a segment link */
  2121 static int has_segment_link(mstate m, msegmentptr ss) {
  2122   msegmentptr sp = &m->seg;
  2123   for (;;) {
  2124     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  2125       return 1;
  2126     if ((sp = sp->next) == 0)
  2127       return 0;
  2128   }
  2129 }
  2130 
  2131 #ifndef MORECORE_CANNOT_TRIM
  2132 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2133 #else  /* MORECORE_CANNOT_TRIM */
  2134 #define should_trim(M,s)  (0)
  2135 #endif /* MORECORE_CANNOT_TRIM */
  2136 
  2137 /*
  2138   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2139   that may be needed to place segment records and fenceposts when new
  2140   noncontiguous segments are added.
  2141 */
  2142 #define TOP_FOOT_SIZE\
  2143   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  2144 
  2145 
  2146 /* -------------------------------  Hooks -------------------------------- */
  2147 
  2148 /*
  2149   PREACTION should be defined to return 0 on success, and nonzero on
  2150   failure. If you are not using locking, you can redefine these to do
  2151   anything you like.
  2152 */
  2153 
  2154 #if USE_LOCKS
  2155 
  2156 /* Ensure locks are initialized */
  2157 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  2158 
  2159 #define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  2160 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2161 #else /* USE_LOCKS */
  2162 
  2163 #ifndef PREACTION
  2164 #define PREACTION(M) (0)
  2165 #endif  /* PREACTION */
  2166 
  2167 #ifndef POSTACTION
  2168 #define POSTACTION(M)
  2169 #endif  /* POSTACTION */
  2170 
  2171 #endif /* USE_LOCKS */
  2172 
  2173 /*
  2174   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2175   USAGE_ERROR_ACTION is triggered on detected bad frees and
  2176   reallocs. The argument p is an address that might have triggered the
  2177   fault. It is ignored by the two predefined actions, but might be
  2178   useful in custom actions that try to help diagnose errors.
  2179 */
  2180 
  2181 #if PROCEED_ON_ERROR
  2182 
  2183 /* A count of the number of corruption errors causing resets */
  2184 int malloc_corruption_error_count;
  2185 
  2186 /* default corruption action */
  2187 static void reset_on_error(mstate m);
  2188 
  2189 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  2190 #define USAGE_ERROR_ACTION(m, p)
  2191 
  2192 #else /* PROCEED_ON_ERROR */
  2193 
  2194 #ifndef CORRUPTION_ERROR_ACTION
  2195 #define CORRUPTION_ERROR_ACTION(m) ABORT
  2196 #endif /* CORRUPTION_ERROR_ACTION */
  2197 
  2198 #ifndef USAGE_ERROR_ACTION
  2199 #define USAGE_ERROR_ACTION(m,p) ABORT
  2200 #endif /* USAGE_ERROR_ACTION */
  2201 
  2202 #endif /* PROCEED_ON_ERROR */
  2203 
  2204 /* -------------------------- Debugging setup ---------------------------- */
  2205 
  2206 #if ! DEBUG
  2207 
  2208 #define check_free_chunk(M,P)
  2209 #define check_inuse_chunk(M,P)
  2210 #define check_malloced_chunk(M,P,N)
  2211 #define check_mmapped_chunk(M,P)
  2212 #define check_malloc_state(M)
  2213 #define check_top_chunk(M,P)
  2214 
  2215 #else /* DEBUG */
  2216 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  2217 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  2218 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2219 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2220 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2221 #define check_malloc_state(M)       do_check_malloc_state(M)
  2222 
  2223 static void   do_check_any_chunk(mstate m, mchunkptr p);
  2224 static void   do_check_top_chunk(mstate m, mchunkptr p);
  2225 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  2226 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  2227 static void   do_check_free_chunk(mstate m, mchunkptr p);
  2228 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  2229 static void   do_check_tree(mstate m, tchunkptr t);
  2230 static void   do_check_treebin(mstate m, bindex_t i);
  2231 static void   do_check_smallbin(mstate m, bindex_t i);
  2232 static void   do_check_malloc_state(mstate m);
  2233 static int    bin_find(mstate m, mchunkptr x);
  2234 static size_t traverse_and_check(mstate m);
  2235 #endif /* DEBUG */
  2236 
  2237 /* ---------------------------- Indexing Bins ---------------------------- */
  2238 
  2239 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  2240 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  2241 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  2242 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  2243 
  2244 /* addressing by index. See above about smallbin repositioning */
  2245 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  2246 #define treebin_at(M,i)     (&((M)->treebins[i]))
  2247 
  2248 /* assign tree index for size S to variable I */
  2249 #if defined(__GNUC__) && defined(i386)
  2250 #define compute_tree_index(S, I)\
  2251 {\
  2252   size_t X = S >> TREEBIN_SHIFT;\
  2253   if (X == 0)\
  2254     I = 0;\
  2255   else if (X > 0xFFFF)\
  2256     I = NTREEBINS-1;\
  2257   else {\
  2258     unsigned int K;\
  2259     __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  2260     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  2261   }\
  2262 }
  2263 #else /* GNUC */
  2264 #define compute_tree_index(S, I)\
  2265 {\
  2266   size_t X = S >> TREEBIN_SHIFT;\
  2267   if (X == 0)\
  2268     I = 0;\
  2269   else if (X > 0xFFFF)\
  2270     I = NTREEBINS-1;\
  2271   else {\
  2272     unsigned int Y = (unsigned int)X;\
  2273     unsigned int N = ((Y - 0x100) >> 16) & 8;\
  2274     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  2275     N += K;\
  2276     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  2277     K = 14 - N + ((Y <<= K) >> 15);\
  2278     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  2279   }\
  2280 }
  2281 #endif /* GNUC */
  2282 
  2283 /* Bit representing maximum resolved size in a treebin at i */
  2284 #define bit_for_tree_index(i) \
  2285    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  2286 
  2287 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
  2288 #define leftshift_for_tree_index(i) \
  2289    ((i == NTREEBINS-1)? 0 : \
  2290     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  2291 
  2292 /* The size of the smallest chunk held in bin with index i */
  2293 #define minsize_for_tree_index(i) \
  2294    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  2295    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  2296 
  2297 
  2298 /* ------------------------ Operations on bin maps ----------------------- */
  2299 
  2300 /* bit corresponding to given index */
  2301 #define idx2bit(i)              ((binmap_t)(1) << (i))
  2302 
  2303 /* Mark/Clear bits with given index */
  2304 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  2305 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  2306 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  2307 
  2308 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  2309 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  2310 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  2311 
  2312 /* index corresponding to given bit */
  2313 
  2314 #if defined(__GNUC__) && defined(i386)
  2315 #define compute_bit2idx(X, I)\
  2316 {\
  2317   unsigned int J;\
  2318   __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  2319   I = (bindex_t)J;\
  2320 }
  2321 
  2322 #else /* GNUC */
  2323 #if  USE_BUILTIN_FFS
  2324 #define compute_bit2idx(X, I) I = ffs(X)-1
  2325 
  2326 #else /* USE_BUILTIN_FFS */
  2327 #define compute_bit2idx(X, I)\
  2328 {\
  2329   unsigned int Y = X - 1;\
  2330   unsigned int K = Y >> (16-4) & 16;\
  2331   unsigned int N = K;        Y >>= K;\
  2332   N += K = Y >> (8-3) &  8;  Y >>= K;\
  2333   N += K = Y >> (4-2) &  4;  Y >>= K;\
  2334   N += K = Y >> (2-1) &  2;  Y >>= K;\
  2335   N += K = Y >> (1-0) &  1;  Y >>= K;\
  2336   I = (bindex_t)(N + Y);\
  2337 }
  2338 #endif /* USE_BUILTIN_FFS */
  2339 #endif /* GNUC */
  2340 
  2341 /* isolate the least set bit of a bitmap */
  2342 #define least_bit(x)         ((x) & -(x))
  2343 
  2344 /* mask with all bits to left of least bit of x on */
  2345 #define left_bits(x)         ((x<<1) | -(x<<1))
  2346 
  2347 /* mask with all bits to left of or equal to least bit of x on */
  2348 #define same_or_left_bits(x) ((x) | -(x))
  2349 
  2350 
  2351 /* ----------------------- Runtime Check Support ------------------------- */
  2352 
  2353 /*
  2354   For security, the main invariant is that malloc/free/etc never
  2355   writes to a static address other than malloc_state, unless static
  2356   malloc_state itself has been corrupted, which cannot occur via
  2357   malloc (because of these checks). In essence this means that we
  2358   believe all pointers, sizes, maps etc held in malloc_state, but
  2359   check all of those linked or offsetted from other embedded data
  2360   structures.  These checks are interspersed with main code in a way
  2361   that tends to minimize their run-time cost.
  2362 
  2363   When FOOTERS is defined, in addition to range checking, we also
  2364   verify footer fields of inuse chunks, which can be used guarantee
  2365   that the mstate controlling malloc/free is intact.  This is a
  2366   streamlined version of the approach described by William Robertson
  2367   et al in "Run-time Detection of Heap-based Overflows" LISA'03
  2368   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  2369   of an inuse chunk holds the xor of its mstate and a random seed,
  2370   that is checked upon calls to free() and realloc().  This is
  2371   (probablistically) unguessable from outside the program, but can be
  2372   computed by any code successfully malloc'ing any chunk, so does not
  2373   itself provide protection against code that has already broken
  2374   security through some other means.  Unlike Robertson et al, we
  2375   always dynamically check addresses of all offset chunks (previous,
  2376   next, etc). This turns out to be cheaper than relying on hashes.
  2377 */
  2378 
  2379 #if !INSECURE
  2380 /* Check if address a is at least as high as any from MORECORE or MMAP */
  2381 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  2382 /* Check if address of next chunk n is higher than base chunk p */
  2383 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
  2384 /* Check if p has its cinuse bit on */
  2385 #define ok_cinuse(p)     cinuse(p)
  2386 /* Check if p has its pinuse bit on */
  2387 #define ok_pinuse(p)     pinuse(p)
  2388 
  2389 #else /* !INSECURE */
  2390 #define ok_address(M, a) (1)
  2391 #define ok_next(b, n)    (1)
  2392 #define ok_cinuse(p)     (1)
  2393 #define ok_pinuse(p)     (1)
  2394 #endif /* !INSECURE */
  2395 
  2396 #if (FOOTERS && !INSECURE)
  2397 /* Check if (alleged) mstate m has expected magic field */
  2398 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2399 #else  /* (FOOTERS && !INSECURE) */
  2400 #define ok_magic(M)      (1)
  2401 #endif /* (FOOTERS && !INSECURE) */
  2402 
  2403 
  2404 /* In gcc, use __builtin_expect to minimize impact of checks */
  2405 #if !INSECURE
  2406 #if defined(__GNUC__) && __GNUC__ >= 3
  2407 #define RTCHECK(e)  __builtin_expect(e, 1)
  2408 #else /* GNUC */
  2409 #define RTCHECK(e)  (e)
  2410 #endif /* GNUC */
  2411 #else /* !INSECURE */
  2412 #define RTCHECK(e)  (1)
  2413 #endif /* !INSECURE */
  2414 
  2415 /* macros to set up inuse chunks with or without footers */
  2416 
  2417 #if !FOOTERS
  2418 
  2419 #define mark_inuse_foot(M,p,s)
  2420 
  2421 /* Set cinuse bit and pinuse bit of next chunk */
  2422 #define set_inuse(M,p,s)\
  2423   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2424   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2425 
  2426 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  2427 #define set_inuse_and_pinuse(M,p,s)\
  2428   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2429   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  2430 
  2431 /* Set size, cinuse and pinuse bit of this chunk */
  2432 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2433   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  2434 
  2435 #else /* FOOTERS */
  2436 
  2437 /* Set foot of inuse chunk to be xor of mstate and seed */
  2438 #define mark_inuse_foot(M,p,s)\
  2439   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  2440 
  2441 #define get_mstate_for(p)\
  2442   ((mstate)(((mchunkptr)((char*)(p) +\
  2443     (chunksize(p))))->prev_foot ^ mparams.magic))
  2444 
  2445 #define set_inuse(M,p,s)\
  2446   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  2447   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  2448   mark_inuse_foot(M,p,s))
  2449 
  2450 #define set_inuse_and_pinuse(M,p,s)\
  2451   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2452   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  2453  mark_inuse_foot(M,p,s))
  2454 
  2455 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  2456   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  2457   mark_inuse_foot(M, p, s))
  2458 
  2459 #endif /* !FOOTERS */
  2460 
  2461 /* ---------------------------- setting mparams -------------------------- */
  2462 
  2463 /* Initialize mparams */
  2464 static int init_mparams(void) {
  2465   if (mparams.page_size == 0) {
  2466     size_t s;
  2467 
  2468     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2469     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2470 #if MORECORE_CONTIGUOUS
  2471     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  2472 #else  /* MORECORE_CONTIGUOUS */
  2473     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  2474 #endif /* MORECORE_CONTIGUOUS */
  2475 
  2476 #if (FOOTERS && !INSECURE)
  2477     {
  2478 #if USE_DEV_RANDOM
  2479       int fd;
  2480       unsigned char buf[sizeof(size_t)];
  2481       /* Try to use /dev/urandom, else fall back on using time */
  2482       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2483           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2484         s = *((size_t *) buf);
  2485         close(fd);
  2486       }
  2487       else
  2488 #endif /* USE_DEV_RANDOM */
  2489         s = (size_t)(time(0) ^ (size_t)0x55555555U);
  2490 
  2491       s |= (size_t)8U;    /* ensure nonzero */
  2492       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  2493 
  2494     }
  2495 #else /* (FOOTERS && !INSECURE) */
  2496     s = (size_t)0x58585858U;
  2497 #endif /* (FOOTERS && !INSECURE) */
  2498     ACQUIRE_MAGIC_INIT_LOCK();
  2499     if (mparams.magic == 0) {
  2500       mparams.magic = s;
  2501       /* Set up lock for main malloc area */
  2502       INITIAL_LOCK(&gm->mutex);
  2503       gm->mflags = mparams.default_mflags;
  2504     }
  2505     RELEASE_MAGIC_INIT_LOCK();
  2506 
  2507 #ifndef WIN32
  2508     mparams.page_size = malloc_getpagesize;
  2509     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  2510                            DEFAULT_GRANULARITY : mparams.page_size);
  2511 #else /* WIN32 */
  2512     {
  2513       SYSTEM_INFO system_info;
  2514       GetSystemInfo(&system_info);
  2515       mparams.page_size = system_info.dwPageSize;
  2516       mparams.granularity = system_info.dwAllocationGranularity;
  2517     }
  2518 #endif /* WIN32 */
  2519 
  2520     /* Sanity-check configuration:
  2521        size_t must be unsigned and as wide as pointer type.
  2522        ints must be at least 4 bytes.
  2523        alignment must be at least 8.
  2524        Alignment, min chunk size, and page size must all be powers of 2.
  2525     */
  2526     if ((sizeof(size_t) != sizeof(char*)) ||
  2527         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  2528         (sizeof(int) < 4)  ||
  2529         (MALLOC_ALIGNMENT < (size_t)8U) ||
  2530         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  2531         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  2532         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  2533         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  2534       ABORT;
  2535   }
  2536   return 0;
  2537 }
  2538 
  2539 /* support for mallopt */
  2540 static int change_mparam(int param_number, int value) {
  2541   size_t val = (size_t)value;
  2542   init_mparams();
  2543   switch(param_number) {
  2544   case M_TRIM_THRESHOLD:
  2545     mparams.trim_threshold = val;
  2546     return 1;
  2547   case M_GRANULARITY:
  2548     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  2549       mparams.granularity = val;
  2550       return 1;
  2551     }
  2552     else
  2553       return 0;
  2554   case M_MMAP_THRESHOLD:
  2555     mparams.mmap_threshold = val;
  2556     return 1;
  2557   default:
  2558     return 0;
  2559   }
  2560 }
  2561 
  2562 #if DEBUG
  2563 /* ------------------------- Debugging Support --------------------------- */
  2564 
  2565 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2566 static void do_check_any_chunk(mstate m, mchunkptr p) {
  2567   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2568   assert(ok_address(m, p));
  2569 }
  2570 
  2571 /* Check properties of top chunk */
  2572 static void do_check_top_chunk(mstate m, mchunkptr p) {
  2573   msegmentptr sp = segment_holding(m, (char*)p);
  2574   size_t  sz = chunksize(p);
  2575   assert(sp != 0);
  2576   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2577   assert(ok_address(m, p));
  2578   assert(sz == m->topsize);
  2579   assert(sz > 0);
  2580   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  2581   assert(pinuse(p));
  2582   assert(!next_pinuse(p));
  2583 }
  2584 
  2585 /* Check properties of (inuse) mmapped chunks */
  2586 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  2587   size_t  sz = chunksize(p);
  2588   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2589   assert(is_mmapped(p));
  2590   assert(use_mmap(m));
  2591   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2592   assert(ok_address(m, p));
  2593   assert(!is_small(sz));
  2594   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  2595   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2596   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  2597 }
  2598 
  2599 /* Check properties of inuse chunks */
  2600 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  2601   do_check_any_chunk(m, p);
  2602   assert(cinuse(p));
  2603   assert(next_pinuse(p));
  2604   /* If not pinuse and not mmapped, previous chunk has OK offset */
  2605   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2606   if (is_mmapped(p))
  2607     do_check_mmapped_chunk(m, p);
  2608 }
  2609 
  2610 /* Check properties of free chunks */
  2611 static void do_check_free_chunk(mstate m, mchunkptr p) {
  2612   size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2613   mchunkptr next = chunk_plus_offset(p, sz);
  2614   do_check_any_chunk(m, p);
  2615   assert(!cinuse(p));
  2616   assert(!next_pinuse(p));
  2617   assert (!is_mmapped(p));
  2618   if (p != m->dv && p != m->top) {
  2619     if (sz >= MIN_CHUNK_SIZE) {
  2620       assert((sz & CHUNK_ALIGN_MASK) == 0);
  2621       assert(is_aligned(chunk2mem(p)));
  2622       assert(next->prev_foot == sz);
  2623       assert(pinuse(p));
  2624       assert (next == m->top || cinuse(next));
  2625       assert(p->fd->bk == p);
  2626       assert(p->bk->fd == p);
  2627     }
  2628     else  /* markers are always of size SIZE_T_SIZE */
  2629       assert(sz == SIZE_T_SIZE);
  2630   }
  2631 }
  2632 
  2633 /* Check properties of malloced chunks at the point they are malloced */
  2634 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  2635   if (mem != 0) {
  2636     mchunkptr p = mem2chunk(mem);
  2637     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2638     do_check_inuse_chunk(m, p);
  2639     assert((sz & CHUNK_ALIGN_MASK) == 0);
  2640     assert(sz >= MIN_CHUNK_SIZE);
  2641     assert(sz >= s);
  2642     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2643     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2644   }
  2645 }
  2646 
  2647 /* Check a tree and its subtrees.  */
  2648 static void do_check_tree(mstate m, tchunkptr t) {
  2649   tchunkptr head = 0;
  2650   tchunkptr u = t;
  2651   bindex_t tindex = t->index;
  2652   size_t tsize = chunksize(t);
  2653   bindex_t idx;
  2654   compute_tree_index(tsize, idx);
  2655   assert(tindex == idx);
  2656   assert(tsize >= MIN_LARGE_SIZE);
  2657   assert(tsize >= minsize_for_tree_index(idx));
  2658   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  2659 
  2660   do { /* traverse through chain of same-sized nodes */
  2661     do_check_any_chunk(m, ((mchunkptr)u));
  2662     assert(u->index == tindex);
  2663     assert(chunksize(u) == tsize);
  2664     assert(!cinuse(u));
  2665     assert(!next_pinuse(u));
  2666     assert(u->fd->bk == u);
  2667     assert(u->bk->fd == u);
  2668     if (u->parent == 0) {
  2669       assert(u->child[0] == 0);
  2670       assert(u->child[1] == 0);
  2671     }
  2672     else {
  2673       assert(head == 0); /* only one node on chain has parent */
  2674       head = u;
  2675       assert(u->parent != u);
  2676       assert (u->parent->child[0] == u ||
  2677               u->parent->child[1] == u ||
  2678               *((tbinptr*)(u->parent)) == u);
  2679       if (u->child[0] != 0) {
  2680         assert(u->child[0]->parent == u);
  2681         assert(u->child[0] != u);
  2682         do_check_tree(m, u->child[0]);
  2683       }
  2684       if (u->child[1] != 0) {
  2685         assert(u->child[1]->parent == u);
  2686         assert(u->child[1] != u);
  2687         do_check_tree(m, u->child[1]);
  2688       }
  2689       if (u->child[0] != 0 && u->child[1] != 0) {
  2690         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2691       }
  2692     }
  2693     u = u->fd;
  2694   } while (u != t);
  2695   assert(head != 0);
  2696 }
  2697 
  2698 /*  Check all the chunks in a treebin.  */
  2699 static void do_check_treebin(mstate m, bindex_t i) {
  2700   tbinptr* tb = treebin_at(m, i);
  2701   tchunkptr t = *tb;
  2702   int empty = (m->treemap & (1U << i)) == 0;
  2703   if (t == 0)
  2704     assert(empty);
  2705   if (!empty)
  2706     do_check_tree(m, t);
  2707 }
  2708 
  2709 /*  Check all the chunks in a smallbin.  */
  2710 static void do_check_smallbin(mstate m, bindex_t i) {
  2711   sbinptr b = smallbin_at(m, i);
  2712   mchunkptr p = b->bk;
  2713   unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2714   if (p == b)
  2715     assert(empty);
  2716   if (!empty) {
  2717     for (; p != b; p = p->bk) {
  2718       size_t size = chunksize(p);
  2719       mchunkptr q;
  2720       /* each chunk claims to be free */
  2721       do_check_free_chunk(m, p);
  2722       /* chunk belongs in bin */
  2723       assert(small_index(size) == i);
  2724       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2725       /* chunk is followed by an inuse chunk */
  2726       q = next_chunk(p);
  2727       if (q->head != FENCEPOST_HEAD)
  2728         do_check_inuse_chunk(m, q);
  2729     }
  2730   }
  2731 }
  2732 
  2733 /* Find x in a bin. Used in other check functions. */
  2734 static int bin_find(mstate m, mchunkptr x) {
  2735   size_t size = chunksize(x);
  2736   if (is_small(size)) {
  2737     bindex_t sidx = small_index(size);
  2738     sbinptr b = smallbin_at(m, sidx);
  2739     if (smallmap_is_marked(m, sidx)) {
  2740       mchunkptr p = b;
  2741       do {
  2742         if (p == x)
  2743           return 1;
  2744       } while ((p = p->fd) != b);
  2745     }
  2746   }
  2747   else {
  2748     bindex_t tidx;
  2749     compute_tree_index(size, tidx);
  2750     if (treemap_is_marked(m, tidx)) {
  2751       tchunkptr t = *treebin_at(m, tidx);
  2752       size_t sizebits = size << leftshift_for_tree_index(tidx);
  2753       while (t != 0 && chunksize(t) != size) {
  2754         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  2755         sizebits <<= 1;
  2756       }
  2757       if (t != 0) {
  2758         tchunkptr u = t;
  2759         do {
  2760           if (u == (tchunkptr)x)
  2761             return 1;
  2762         } while ((u = u->fd) != t);
  2763       }
  2764     }
  2765   }
  2766   return 0;
  2767 }
  2768 
  2769 /* Traverse each chunk and check it; return total */
  2770 static size_t traverse_and_check(mstate m) {
  2771   size_t sum = 0;
  2772   if (is_initialized(m)) {
  2773     msegmentptr s = &m->seg;
  2774     sum += m->topsize + TOP_FOOT_SIZE;
  2775     while (s != 0) {
  2776       mchunkptr q = align_as_chunk(s->base);
  2777       mchunkptr lastq = 0;
  2778       assert(pinuse(q));
  2779       while (segment_holds(s, q) &&
  2780              q != m->top && q->head != FENCEPOST_HEAD) {
  2781         sum += chunksize(q);
  2782         if (cinuse(q)) {
  2783           assert(!bin_find(m, q));
  2784           do_check_inuse_chunk(m, q);
  2785         }
  2786         else {
  2787           assert(q == m->dv || bin_find(m, q));
  2788           assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  2789           do_check_free_chunk(m, q);
  2790         }
  2791         lastq = q;
  2792         q = next_chunk(q);
  2793       }
  2794       s = s->next;
  2795     }
  2796   }
  2797   return sum;
  2798 }
  2799 
  2800 /* Check all properties of malloc_state. */
  2801 static void do_check_malloc_state(mstate m) {
  2802   bindex_t i;
  2803   size_t total;
  2804   /* check bins */
  2805   for (i = 0; i < NSMALLBINS; ++i)
  2806     do_check_smallbin(m, i);
  2807   for (i = 0; i < NTREEBINS; ++i)
  2808     do_check_treebin(m, i);
  2809 
  2810   if (m->dvsize != 0) { /* check dv chunk */
  2811     do_check_any_chunk(m, m->dv);
  2812     assert(m->dvsize == chunksize(m->dv));
  2813     assert(m->dvsize >= MIN_CHUNK_SIZE);
  2814     assert(bin_find(m, m->dv) == 0);
  2815   }
  2816 
  2817   if (m->top != 0) {   /* check top chunk */
  2818     do_check_top_chunk(m, m->top);
  2819     assert(m->topsize == chunksize(m->top));
  2820     assert(m->topsize > 0);
  2821     assert(bin_find(m, m->top) == 0);
  2822   }
  2823 
  2824   total = traverse_and_check(m);
  2825   assert(total <= m->footprint);
  2826   assert(m->footprint <= m->max_footprint);
  2827 }
  2828 #endif /* DEBUG */
  2829 
  2830 /* ----------------------------- statistics ------------------------------ */
  2831 
  2832 #if !NO_MALLINFO
  2833 static struct mallinfo internal_mallinfo(mstate m) {
  2834   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  2835   if (!PREACTION(m)) {
  2836     check_malloc_state(m);
  2837     if (is_initialized(m)) {
  2838       size_t nfree = SIZE_T_ONE; /* top always free */
  2839       size_t mfree = m->topsize + TOP_FOOT_SIZE;
  2840       size_t sum = mfree;
  2841       msegmentptr s = &m->seg;
  2842       while (s != 0) {
  2843         mchunkptr q = align_as_chunk(s->base);
  2844         while (segment_holds(s, q) &&
  2845                q != m->top && q->head != FENCEPOST_HEAD) {
  2846           size_t sz = chunksize(q);
  2847           sum += sz;
  2848           if (!cinuse(q)) {
  2849             mfree += sz;
  2850             ++nfree;
  2851           }
  2852           q = next_chunk(q);
  2853         }
  2854         s = s->next;
  2855       }
  2856 
  2857       nm.arena    = sum;
  2858       nm.ordblks  = nfree;
  2859       nm.hblkhd   = m->footprint - sum;
  2860       nm.usmblks  = m->max_footprint;
  2861       nm.uordblks = m->footprint - mfree;
  2862       nm.fordblks = mfree;
  2863       nm.keepcost = m->topsize;
  2864     }
  2865 
  2866     POSTACTION(m);
  2867   }
  2868   return nm;
  2869 }
  2870 #endif /* !NO_MALLINFO */
  2871 
  2872 static void internal_malloc_stats(mstate m) {
  2873   if (!PREACTION(m)) {
  2874     size_t maxfp = 0;
  2875     size_t fp = 0;
  2876     size_t used = 0;
  2877     check_malloc_state(m);
  2878     if (is_initialized(m)) {
  2879       msegmentptr s = &m->seg;
  2880       maxfp = m->max_footprint;
  2881       fp = m->footprint;
  2882       used = fp - (m->topsize + TOP_FOOT_SIZE);
  2883 
  2884       while (s != 0) {
  2885         mchunkptr q = align_as_chunk(s->base);
  2886         while (segment_holds(s, q) &&
  2887                q != m->top && q->head != FENCEPOST_HEAD) {
  2888           if (!cinuse(q))
  2889             used -= chunksize(q);
  2890           q = next_chunk(q);
  2891         }
  2892         s = s->next;
  2893       }
  2894     }
  2895 
  2896 #ifndef LACKS_STDIO_H
  2897     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  2898     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  2899     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  2900 #endif
  2901 
  2902     POSTACTION(m);
  2903   }
  2904 }
  2905 
  2906 /* ----------------------- Operations on smallbins ----------------------- */
  2907 
  2908 /*
  2909   Various forms of linking and unlinking are defined as macros.  Even
  2910   the ones for trees, which are very long but have very short typical
  2911   paths.  This is ugly but reduces reliance on inlining support of
  2912   compilers.
  2913 */
  2914 
  2915 /* Link a free chunk into a smallbin  */
  2916 #define insert_small_chunk(M, P, S) {\
  2917   bindex_t I  = small_index(S);\
  2918   mchunkptr B = smallbin_at(M, I);\
  2919   mchunkptr F = B;\
  2920   assert(S >= MIN_CHUNK_SIZE);\
  2921   if (!smallmap_is_marked(M, I))\
  2922     mark_smallmap(M, I);\
  2923   else if (RTCHECK(ok_address(M, B->fd)))\
  2924     F = B->fd;\
  2925   else {\
  2926     CORRUPTION_ERROR_ACTION(M);\
  2927   }\
  2928   B->fd = P;\
  2929   F->bk = P;\
  2930   P->fd = F;\
  2931   P->bk = B;\
  2932 }
  2933 
  2934 /* Unlink a chunk from a smallbin  */
  2935 #define unlink_small_chunk(M, P, S) {\
  2936   mchunkptr F = P->fd;\
  2937   mchunkptr B = P->bk;\
  2938   bindex_t I = small_index(S);\
  2939   assert(P != B);\
  2940   assert(P != F);\
  2941   assert(chunksize(P) == small_index2size(I));\
  2942   if (F == B)\
  2943     clear_smallmap(M, I);\
  2944   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  2945                    (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  2946     F->bk = B;\
  2947     B->fd = F;\
  2948   }\
  2949   else {\
  2950     CORRUPTION_ERROR_ACTION(M);\
  2951   }\
  2952 }
  2953 
  2954 /* Unlink the first chunk from a smallbin */
  2955 #define unlink_first_small_chunk(M, B, P, I) {\
  2956   mchunkptr F = P->fd;\
  2957   assert(P != B);\
  2958   assert(P != F);\
  2959   assert(chunksize(P) == small_index2size(I));\
  2960   if (B == F)\
  2961     clear_smallmap(M, I);\
  2962   else if (RTCHECK(ok_address(M, F))) {\
  2963     B->fd = F;\
  2964     F->bk = B;\
  2965   }\
  2966   else {\
  2967     CORRUPTION_ERROR_ACTION(M);\
  2968   }\
  2969 }
  2970 
  2971 /* Replace dv node, binning the old one */
  2972 /* Used only when dvsize known to be small */
  2973 #define replace_dv(M, P, S) {\
  2974   size_t DVS = M->dvsize;\
  2975   if (DVS != 0) {\
  2976     mchunkptr DV = M->dv;\
  2977     assert(is_small(DVS));\
  2978     insert_small_chunk(M, DV, DVS);\
  2979   }\
  2980   M->dvsize = S;\
  2981   M->dv = P;\
  2982 }
  2983 
  2984 /* ------------------------- Operations on trees ------------------------- */
  2985 
  2986 /* Insert chunk into tree */
  2987 #define insert_large_chunk(M, X, S) {\
  2988   tbinptr* H;\
  2989   bindex_t I;\
  2990   compute_tree_index(S, I);\
  2991   H = treebin_at(M, I);\
  2992   X->index = I;\
  2993   X->child[0] = X->child[1] = 0;\
  2994   if (!treemap_is_marked(M, I)) {\
  2995     mark_treemap(M, I);\
  2996     *H = X;\
  2997     X->parent = (tchunkptr)H;\
  2998     X->fd = X->bk = X;\
  2999   }\
  3000   else {\
  3001     tchunkptr T = *H;\
  3002     size_t K = S << leftshift_for_tree_index(I);\
  3003     for (;;) {\
  3004       if (chunksize(T) != S) {\
  3005         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  3006         K <<= 1;\
  3007         if (*C != 0)\
  3008           T = *C;\
  3009         else if (RTCHECK(ok_address(M, C))) {\
  3010           *C = X;\
  3011           X->parent = T;\
  3012           X->fd = X->bk = X;\
  3013           break;\
  3014         }\
  3015         else {\
  3016           CORRUPTION_ERROR_ACTION(M);\
  3017           break;\
  3018         }\
  3019       }\
  3020       else {\
  3021         tchunkptr F = T->fd;\
  3022         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  3023           T->fd = F->bk = X;\
  3024           X->fd = F;\
  3025           X->bk = T;\
  3026           X->parent = 0;\
  3027           break;\
  3028         }\
  3029         else {\
  3030           CORRUPTION_ERROR_ACTION(M);\
  3031           break;\
  3032         }\
  3033       }\
  3034     }\
  3035   }\
  3036 }
  3037 
  3038 /*
  3039   Unlink steps:
  3040 
  3041   1. If x is a chained node, unlink it from its same-sized fd/bk links
  3042      and choose its bk node as its replacement.
  3043   2. If x was the last node of its size, but not a leaf node, it must
  3044      be replaced with a leaf node (not merely one with an open left or
  3045      right), to make sure that lefts and rights of descendents
  3046      correspond properly to bit masks.  We use the rightmost descendent
  3047      of x.  We could use any other leaf, but this is easy to locate and
  3048      tends to counteract removal of leftmosts elsewhere, and so keeps
  3049      paths shorter than minimally guaranteed.  This doesn't loop much
  3050      because on average a node in a tree is near the bottom.
  3051   3. If x is the base of a chain (i.e., has parent links) relink
  3052      x's parent and children to x's replacement (or null if none).
  3053 */
  3054 
  3055 #define unlink_large_chunk(M, X) {\
  3056   tchunkptr XP = X->parent;\
  3057   tchunkptr R;\
  3058   if (X->bk != X) {\
  3059     tchunkptr F = X->fd;\
  3060     R = X->bk;\
  3061     if (RTCHECK(ok_address(M, F))) {\
  3062       F->bk = R;\
  3063       R->fd = F;\
  3064     }\
  3065     else {\
  3066       CORRUPTION_ERROR_ACTION(M);\
  3067     }\
  3068   }\
  3069   else {\
  3070     tchunkptr* RP;\
  3071     if (((R = *(RP = &(X->child[1]))) != 0) ||\
  3072         ((R = *(RP = &(X->child[0]))) != 0)) {\
  3073       tchunkptr* CP;\
  3074       while ((*(CP = &(R->child[1])) != 0) ||\
  3075              (*(CP = &(R->child[0])) != 0)) {\
  3076         R = *(RP = CP);\
  3077       }\
  3078       if (RTCHECK(ok_address(M, RP)))\
  3079         *RP = 0;\
  3080       else {\
  3081         CORRUPTION_ERROR_ACTION(M);\
  3082       }\
  3083     }\
  3084   }\
  3085   if (XP != 0) {\
  3086     tbinptr* H = treebin_at(M, X->index);\
  3087     if (X == *H) {\
  3088       if ((*H = R) == 0) \
  3089         clear_treemap(M, X->index);\
  3090     }\
  3091     else if (RTCHECK(ok_address(M, XP))) {\
  3092       if (XP->child[0] == X) \
  3093         XP->child[0] = R;\
  3094       else \
  3095         XP->child[1] = R;\
  3096     }\
  3097     else\
  3098       CORRUPTION_ERROR_ACTION(M);\
  3099     if (R != 0) {\
  3100       if (RTCHECK(ok_address(M, R))) {\
  3101         tchunkptr C0, C1;\
  3102         R->parent = XP;\
  3103         if ((C0 = X->child[0]) != 0) {\
  3104           if (RTCHECK(ok_address(M, C0))) {\
  3105             R->child[0] = C0;\
  3106             C0->parent = R;\
  3107           }\
  3108           else\
  3109             CORRUPTION_ERROR_ACTION(M);\
  3110         }\
  3111         if ((C1 = X->child[1]) != 0) {\
  3112           if (RTCHECK(ok_address(M, C1))) {\
  3113             R->child[1] = C1;\
  3114             C1->parent = R;\
  3115           }\
  3116           else\
  3117             CORRUPTION_ERROR_ACTION(M);\
  3118         }\
  3119       }\
  3120       else\
  3121         CORRUPTION_ERROR_ACTION(M);\
  3122     }\
  3123   }\
  3124 }
  3125 
  3126 /* Relays to large vs small bin operations */
  3127 
  3128 #define insert_chunk(M, P, S)\
  3129   if (is_small(S)) insert_small_chunk(M, P, S)\
  3130   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  3131 
  3132 #define unlink_chunk(M, P, S)\
  3133   if (is_small(S)) unlink_small_chunk(M, P, S)\
  3134   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  3135 
  3136 
  3137 /* Relays to internal calls to malloc/free from realloc, memalign etc */
  3138 
  3139 #if ONLY_MSPACES
  3140 #define internal_malloc(m, b) mspace_malloc(m, b)
  3141 #define internal_free(m, mem) mspace_free(m,mem);
  3142 #else /* ONLY_MSPACES */
  3143 #if MSPACES
  3144 #define internal_malloc(m, b)\
  3145    (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  3146 #define internal_free(m, mem)\
  3147    if (m == gm) dlfree(mem); else mspace_free(m,mem);
  3148 #else /* MSPACES */
  3149 #define internal_malloc(m, b) dlmalloc(b)
  3150 #define internal_free(m, mem) dlfree(mem)
  3151 #endif /* MSPACES */
  3152 #endif /* ONLY_MSPACES */
  3153 
  3154 /* -----------------------  Direct-mmapping chunks ----------------------- */
  3155 
  3156 /*
  3157   Directly mmapped chunks are set up with an offset to the start of
  3158   the mmapped region stored in the prev_foot field of the chunk. This
  3159   allows reconstruction of the required argument to MUNMAP when freed,
  3160   and also allows adjustment of the returned chunk to meet alignment
  3161   requirements (especially in memalign).  There is also enough space
  3162   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3163   the PINUSE bit so frees can be checked.
  3164 */
  3165 
  3166 /* Malloc using mmap */
  3167 static void* mmap_alloc(mstate m, size_t nb) {
  3168   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3169   if (mmsize > nb) {     /* Check for wrap around 0 */
  3170     char* mm = (char*)(DIRECT_MMAP(mmsize));
  3171     if (mm != CMFAIL) {
  3172       size_t offset = align_offset(chunk2mem(mm));
  3173       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3174       mchunkptr p = (mchunkptr)(mm + offset);
  3175       p->prev_foot = offset | IS_MMAPPED_BIT;
  3176       (p)->head = (psize|CINUSE_BIT);
  3177       mark_inuse_foot(m, p, psize);
  3178       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3179       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  3180 
  3181       if (mm < m->least_addr)
  3182         m->least_addr = mm;
  3183       if ((m->footprint += mmsize) > m->max_footprint)
  3184         m->max_footprint = m->footprint;
  3185       assert(is_aligned(chunk2mem(p)));
  3186       check_mmapped_chunk(m, p);
  3187       return chunk2mem(p);
  3188     }
  3189   }
  3190   return 0;
  3191 }
  3192 
  3193 /* Realloc using mmap */
  3194 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  3195   size_t oldsize = chunksize(oldp);
  3196   if (is_small(nb)) /* Can't shrink mmap regions below small size */
  3197     return 0;
  3198   /* Keep old chunk if big enough but not too big */
  3199   if (oldsize >= nb + SIZE_T_SIZE &&
  3200       (oldsize - nb) <= (mparams.granularity << 1))
  3201     return oldp;
  3202   else {
  3203     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  3204     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  3205     size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  3206                                          CHUNK_ALIGN_MASK);
  3207     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
  3208                                   oldmmsize, newmmsize, 1);
  3209     if (cp != CMFAIL) {
  3210       mchunkptr newp = (mchunkptr)(cp + offset);
  3211       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  3212       newp->head = (psize|CINUSE_BIT);
  3213       mark_inuse_foot(m, newp, psize);
  3214       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  3215       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
  3216 
  3217       if (cp < m->least_addr)
  3218         m->least_addr = cp;
  3219       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  3220         m->max_footprint = m->footprint;
  3221       check_mmapped_chunk(m, newp);
  3222       return newp;
  3223     }
  3224   }
  3225   return 0;
  3226 }
  3227 
  3228 /* -------------------------- mspace management -------------------------- */
  3229 
  3230 /* Initialize top chunk and its size */
  3231 static void init_top(mstate m, mchunkptr p, size_t psize) {
  3232   /* Ensure alignment */
  3233   size_t offset = align_offset(chunk2mem(p));
  3234   p = (mchunkptr)((char*)p + offset);
  3235   psize -= offset;
  3236 
  3237   m->top = p;
  3238   m->topsize = psize;
  3239   p->head = psize | PINUSE_BIT;
  3240   /* set size of fake trailing chunk holding overhead space only once */
  3241   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3242   m->trim_check = mparams.trim_threshold; /* reset on each update */
  3243 }
  3244 
  3245 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3246 static void init_bins(mstate m) {
  3247   /* Establish circular links for smallbins */
  3248   bindex_t i;
  3249   for (i = 0; i < NSMALLBINS; ++i) {
  3250     sbinptr bin = smallbin_at(m,i);
  3251     bin->fd = bin->bk = bin;
  3252   }
  3253 }
  3254 
  3255 #if PROCEED_ON_ERROR
  3256 
  3257 /* default corruption action */
  3258 static void reset_on_error(mstate m) {
  3259   int i;
  3260   ++malloc_corruption_error_count;
  3261   /* Reinitialize fields to forget about all memory */
  3262   m->smallbins = m->treebins = 0;
  3263   m->dvsize = m->topsize = 0;
  3264   m->seg.base = 0;
  3265   m->seg.size = 0;
  3266   m->seg.next = 0;
  3267   m->top = m->dv = 0;
  3268   for (i = 0; i < NTREEBINS; ++i)
  3269     *treebin_at(m, i) = 0;
  3270   init_bins(m);
  3271 }
  3272 #endif /* PROCEED_ON_ERROR */
  3273 
  3274 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3275 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  3276                            size_t nb) {
  3277   mchunkptr p = align_as_chunk(newbase);
  3278   mchunkptr oldfirst = align_as_chunk(oldbase);
  3279   size_t psize = (char*)oldfirst - (char*)p;
  3280   mchunkptr q = chunk_plus_offset(p, nb);
  3281   size_t qsize = psize - nb;
  3282   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3283 
  3284   assert((char*)oldfirst > (char*)q);
  3285   assert(pinuse(oldfirst));
  3286   assert(qsize >= MIN_CHUNK_SIZE);
  3287 
  3288   /* consolidate remainder with first chunk of old base */
  3289   if (oldfirst == m->top) {
  3290     size_t tsize = m->topsize += qsize;
  3291     m->top = q;
  3292     q->head = tsize | PINUSE_BIT;
  3293     check_top_chunk(m, q);
  3294   }
  3295   else if (oldfirst == m->dv) {
  3296     size_t dsize = m->dvsize += qsize;
  3297     m->dv = q;
  3298     set_size_and_pinuse_of_free_chunk(q, dsize);
  3299   }
  3300   else {
  3301     if (!cinuse(oldfirst)) {
  3302       size_t nsize = chunksize(oldfirst);
  3303       unlink_chunk(m, oldfirst, nsize);
  3304       oldfirst = chunk_plus_offset(oldfirst, nsize);
  3305       qsize += nsize;
  3306     }
  3307     set_free_with_pinuse(q, qsize, oldfirst);
  3308     insert_chunk(m, q, qsize);
  3309     check_free_chunk(m, q);
  3310   }
  3311 
  3312   check_malloced_chunk(m, chunk2mem(p), nb);
  3313   return chunk2mem(p);
  3314 }
  3315 
  3316 
  3317 /* Add a segment to hold a new noncontiguous region */
  3318 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  3319   /* Determine locations and sizes of segment, fenceposts, old top */
  3320   char* old_top = (char*)m->top;
  3321   msegmentptr oldsp = segment_holding(m, old_top);
  3322   char* old_end = oldsp->base + oldsp->size;
  3323   size_t ssize = pad_request(sizeof(struct malloc_segment));
  3324   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3325   size_t offset = align_offset(chunk2mem(rawsp));
  3326   char* asp = rawsp + offset;
  3327   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  3328   mchunkptr sp = (mchunkptr)csp;
  3329   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  3330   mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3331   mchunkptr p = tnext;
  3332   int nfences = 0;
  3333 
  3334   /* reset top to new space */
  3335   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3336 
  3337   /* Set up segment record */
  3338   assert(is_aligned(ss));
  3339   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3340   *ss = m->seg; /* Push current record */
  3341   m->seg.base = tbase;
  3342   m->seg.size = tsize;
  3343   m->seg.sflags = mmapped;
  3344   m->seg.next = ss;
  3345 
  3346   /* Insert trailing fenceposts */
  3347   for (;;) {
  3348     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3349     p->head = FENCEPOST_HEAD;
  3350     ++nfences;
  3351     if ((char*)(&(nextp->head)) < old_end)
  3352       p = nextp;
  3353     else
  3354       break;
  3355   }
  3356   assert(nfences >= 2);
  3357 
  3358   /* Insert the rest of old top into a bin as an ordinary free chunk */
  3359   if (csp != old_top) {
  3360     mchunkptr q = (mchunkptr)old_top;
  3361     size_t psize = csp - old_top;
  3362     mchunkptr tn = chunk_plus_offset(q, psize);
  3363     set_free_with_pinuse(q, psize, tn);
  3364     insert_chunk(m, q, psize);
  3365   }
  3366 
  3367   check_top_chunk(m, m->top);
  3368 }
  3369 
  3370 /* -------------------------- System allocation -------------------------- */
  3371 
  3372 /* Get memory from system using MORECORE or MMAP */
  3373 static void* sys_alloc(mstate m, size_t nb) {
  3374   char* tbase = CMFAIL;
  3375   size_t tsize = 0;
  3376   flag_t mmap_flag = 0;
  3377 
  3378   init_mparams();
  3379 
  3380   /* Directly map large chunks */
  3381   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3382     void* mem = mmap_alloc(m, nb);
  3383     if (mem != 0)
  3384       return mem;
  3385   }
  3386 
  3387   /*
  3388     Try getting memory in any of three ways (in most-preferred to
  3389     least-preferred order):
  3390     1. A call to MORECORE that can normally contiguously extend memory.
  3391        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3392        or main space is mmapped or a previous contiguous call failed)
  3393     2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3394        Note that under the default settings, if MORECORE is unable to
  3395        fulfill a request, and HAVE_MMAP is true, then mmap is
  3396        used as a noncontiguous system allocator. This is a useful backup
  3397        strategy for systems with holes in address spaces -- in this case
  3398        sbrk cannot contiguously expand the heap, but mmap may be able to
  3399        find space.
  3400     3. A call to MORECORE that cannot usually contiguously extend memory.
  3401        (disabled if not HAVE_MORECORE)
  3402   */
  3403 
  3404   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3405     char* br = CMFAIL;
  3406     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  3407     size_t asize = 0;
  3408     ACQUIRE_MORECORE_LOCK();
  3409 
  3410     if (ss == 0) {  /* First time through or recovery */
  3411       char* base = (char*)CALL_MORECORE(0);
  3412       if (base != CMFAIL) {
  3413         asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3414         /* Adjust to end on a page boundary */
  3415         if (!is_page_aligned(base))
  3416           asize += (page_align((size_t)base) - (size_t)base);
  3417         /* Can't call MORECORE if size is negative when treated as signed */
  3418         if (asize < HALF_MAX_SIZE_T &&
  3419             (br = (char*)(CALL_MORECORE(asize))) == base) {
  3420           tbase = base;
  3421           tsize = asize;
  3422         }
  3423       }
  3424     }
  3425     else {
  3426       /* Subtract out existing available top space from MORECORE request. */
  3427       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
  3428       /* Use mem here only if it did continuously extend old space */
  3429       if (asize < HALF_MAX_SIZE_T &&
  3430           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  3431         tbase = br;
  3432         tsize = asize;
  3433       }
  3434     }
  3435 
  3436     if (tbase == CMFAIL) {    /* Cope with partial failure */
  3437       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  3438         if (asize < HALF_MAX_SIZE_T &&
  3439             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3440           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
  3441           if (esize < HALF_MAX_SIZE_T) {
  3442             char* end = (char*)CALL_MORECORE(esize);
  3443             if (end != CMFAIL)
  3444               asize += esize;
  3445             else {            /* Can't use; try to release */
  3446               end = (char*)CALL_MORECORE(-asize);
  3447               br = CMFAIL;
  3448             }
  3449           }
  3450         }
  3451       }
  3452       if (br != CMFAIL) {    /* Use the space we did get */
  3453         tbase = br;
  3454         tsize = asize;
  3455       }
  3456       else
  3457         disable_contiguous(m); /* Don't try contiguous path in the future */
  3458     }
  3459 
  3460     RELEASE_MORECORE_LOCK();
  3461   }
  3462 
  3463   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  3464     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  3465     size_t rsize = granularity_align(req);
  3466     if (rsize > nb) { /* Fail if wraps around zero */
  3467       char* mp = (char*)(CALL_MMAP(rsize));
  3468       if (mp != CMFAIL) {
  3469         tbase = mp;
  3470         tsize = rsize;
  3471         mmap_flag = IS_MMAPPED_BIT;
  3472       }
  3473     }
  3474   }
  3475 
  3476   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
  3477     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3478     if (asize < HALF_MAX_SIZE_T) {
  3479       char* br = CMFAIL;
  3480       char* end = CMFAIL;
  3481       ACQUIRE_MORECORE_LOCK();
  3482       br = (char*)(CALL_MORECORE(asize));
  3483       end = (char*)(CALL_MORECORE(0));
  3484       RELEASE_MORECORE_LOCK();
  3485       if (br != CMFAIL && end != CMFAIL && br < end) {
  3486         size_t ssize = end - br;
  3487         if (ssize > nb + TOP_FOOT_SIZE) {
  3488           tbase = br;
  3489           tsize = ssize;
  3490         }
  3491       }
  3492     }
  3493   }
  3494 
  3495   if (tbase != CMFAIL) {
  3496 
  3497     if ((m->footprint += tsize) > m->max_footprint)
  3498       m->max_footprint = m->footprint;
  3499 
  3500     if (!is_initialized(m)) { /* first-time initialization */
  3501       m->seg.base = m->least_addr = tbase;
  3502       m->seg.size = tsize;
  3503       m->seg.sflags = mmap_flag;
  3504       m->magic = mparams.magic;
  3505       init_bins(m);
  3506       if (is_global(m)) 
  3507         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3508       else {
  3509         /* Offset top by embedded malloc_state */
  3510         mchunkptr mn = next_chunk(mem2chunk(m));
  3511         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
  3512       }
  3513     }
  3514 
  3515     else {
  3516       /* Try to merge with an existing segment */
  3517       msegmentptr sp = &m->seg;
  3518       while (sp != 0 && tbase != sp->base + sp->size)
  3519         sp = sp->next;
  3520       if (sp != 0 &&
  3521           !is_extern_segment(sp) &&
  3522           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
  3523           segment_holds(sp, m->top)) { /* append */
  3524         sp->size += tsize;
  3525         init_top(m, m->top, m->topsize + tsize);
  3526       }
  3527       else {
  3528         if (tbase < m->least_addr)
  3529           m->least_addr = tbase;
  3530         sp = &m->seg;
  3531         while (sp != 0 && sp->base != tbase + tsize)
  3532           sp = sp->next;
  3533         if (sp != 0 &&
  3534             !is_extern_segment(sp) &&
  3535             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  3536           char* oldbase = sp->base;
  3537           sp->base = tbase;
  3538           sp->size += tsize;
  3539           return prepend_alloc(m, tbase, oldbase, nb);
  3540         }
  3541         else
  3542           add_segment(m, tbase, tsize, mmap_flag);
  3543       }
  3544     }
  3545 
  3546     if (nb < m->topsize) { /* Allocate from new or extended top space */
  3547       size_t rsize = m->topsize -= nb;
  3548       mchunkptr p = m->top;
  3549       mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3550       r->head = rsize | PINUSE_BIT;
  3551       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3552       check_top_chunk(m, m->top);
  3553       check_malloced_chunk(m, chunk2mem(p), nb);
  3554       return chunk2mem(p);
  3555     }
  3556   }
  3557 
  3558   MALLOC_FAILURE_ACTION;
  3559   return 0;
  3560 }
  3561 
  3562 /* -----------------------  system deallocation -------------------------- */
  3563 
  3564 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3565 static size_t release_unused_segments(mstate m) {
  3566   size_t released = 0;
  3567   msegmentptr pred = &m->seg;
  3568   msegmentptr sp = pred->next;
  3569   while (sp != 0) {
  3570     char* base = sp->base;
  3571     size_t size = sp->size;
  3572     msegmentptr next = sp->next;
  3573     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3574       mchunkptr p = align_as_chunk(base);
  3575       size_t psize = chunksize(p);
  3576       /* Can unmap if first chunk holds entire segment and not pinned */
  3577       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
  3578         tchunkptr tp = (tchunkptr)p;
  3579         assert(segment_holds(sp, (char*)sp));
  3580         if (p == m->dv) {
  3581           m->dv = 0;
  3582           m->dvsize = 0;
  3583         }
  3584         else {
  3585           unlink_large_chunk(m, tp);
  3586         }
  3587         if (CALL_MUNMAP(base, size) == 0) {
  3588           released += size;
  3589           m->footprint -= size;
  3590           /* unlink obsoleted record */
  3591           sp = pred;
  3592           sp->next = next;
  3593         }
  3594         else { /* back out if cannot unmap */
  3595           insert_large_chunk(m, tp, psize);
  3596         }
  3597       }
  3598     }
  3599     pred = sp;
  3600     sp = next;
  3601   }
  3602   return released;
  3603 }
  3604 
  3605 static int sys_trim(mstate m, size_t pad) {
  3606   size_t released = 0;
  3607   if (pad < MAX_REQUEST && is_initialized(m)) {
  3608     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
  3609 
  3610     if (m->topsize > pad) {
  3611       /* Shrink top space in granularity-size units, keeping at least one */
  3612       size_t unit = mparams.granularity;
  3613       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  3614                       SIZE_T_ONE) * unit;
  3615       msegmentptr sp = segment_holding(m, (char*)m->top);
  3616 
  3617       if (!is_extern_segment(sp)) {
  3618         if (is_mmapped_segment(sp)) {
  3619           if (HAVE_MMAP &&
  3620               sp->size >= extra &&
  3621               !has_segment_link(m, sp)) { /* can't shrink if pinned */
  3622             size_t newsize = sp->size - extra;
  3623             /* Prefer mremap, fall back to munmap */
  3624             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
  3625                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  3626               released = extra;
  3627             }
  3628           }
  3629         }
  3630         else if (HAVE_MORECORE) {
  3631           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
  3632             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  3633           ACQUIRE_MORECORE_LOCK();
  3634           {
  3635             /* Make sure end of memory is where we last set it. */
  3636             char* old_br = (char*)(CALL_MORECORE(0));
  3637             if (old_br == sp->base + sp->size) {
  3638               char* rel_br = (char*)(CALL_MORECORE(-extra));
  3639               char* new_br = (char*)(CALL_MORECORE(0));
  3640               if (rel_br != CMFAIL && new_br < old_br)
  3641                 released = old_br - new_br;
  3642             }
  3643           }
  3644           RELEASE_MORECORE_LOCK();
  3645         }
  3646       }
  3647 
  3648       if (released != 0) {
  3649         sp->size -= released;
  3650         m->footprint -= released;
  3651         init_top(m, m->top, m->topsize - released);
  3652         check_top_chunk(m, m->top);
  3653       }
  3654     }
  3655 
  3656     /* Unmap any unused mmapped segments */
  3657     if (HAVE_MMAP) 
  3658       released += release_unused_segments(m);
  3659 
  3660     /* On failure, disable autotrim to avoid repeated failed future calls */
  3661     if (released == 0)
  3662       m->trim_check = MAX_SIZE_T;
  3663   }
  3664 
  3665   return (released != 0)? 1 : 0;
  3666 }
  3667 
  3668 /* ---------------------------- malloc support --------------------------- */
  3669 
  3670 /* allocate a large request from the best fitting chunk in a treebin */
  3671 static void* tmalloc_large(mstate m, size_t nb) {
  3672   tchunkptr v = 0;
  3673   size_t rsize = -nb; /* Unsigned negation */
  3674   tchunkptr t;
  3675   bindex_t idx;
  3676   compute_tree_index(nb, idx);
  3677 
  3678   if ((t = *treebin_at(m, idx)) != 0) {
  3679     /* Traverse tree for this bin looking for node with size == nb */
  3680     size_t sizebits = nb << leftshift_for_tree_index(idx);
  3681     tchunkptr rst = 0;  /* The deepest untaken right subtree */
  3682     for (;;) {
  3683       tchunkptr rt;
  3684       size_t trem = chunksize(t) - nb;
  3685       if (trem < rsize) {
  3686         v = t;
  3687         if ((rsize = trem) == 0)
  3688           break;
  3689       }
  3690       rt = t->child[1];
  3691       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  3692       if (rt != 0 && rt != t)
  3693         rst = rt;
  3694       if (t == 0) {
  3695         t = rst; /* set t to least subtree holding sizes > nb */
  3696         break;
  3697       }
  3698       sizebits <<= 1;
  3699     }
  3700   }
  3701 
  3702   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
  3703     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  3704     if (leftbits != 0) {
  3705       bindex_t i;
  3706       binmap_t leastbit = least_bit(leftbits);
  3707       compute_bit2idx(leastbit, i);
  3708       t = *treebin_at(m, i);
  3709     }
  3710   }
  3711 
  3712   while (t != 0) { /* find smallest of tree or subtree */
  3713     size_t trem = chunksize(t) - nb;
  3714     if (trem < rsize) {
  3715       rsize = trem;
  3716       v = t;
  3717     }
  3718     t = leftmost_child(t);
  3719   }
  3720 
  3721   /*  If dv is a better fit, return 0 so malloc will use it */
  3722   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
  3723     if (RTCHECK(ok_address(m, v))) { /* split */
  3724       mchunkptr r = chunk_plus_offset(v, nb);
  3725       assert(chunksize(v) == rsize + nb);
  3726       if (RTCHECK(ok_next(v, r))) {
  3727         unlink_large_chunk(m, v);
  3728         if (rsize < MIN_CHUNK_SIZE)
  3729           set_inuse_and_pinuse(m, v, (rsize + nb));
  3730         else {
  3731           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3732           set_size_and_pinuse_of_free_chunk(r, rsize);
  3733           insert_chunk(m, r, rsize);
  3734         }
  3735         return chunk2mem(v);
  3736       }
  3737     }
  3738     CORRUPTION_ERROR_ACTION(m);
  3739   }
  3740   return 0;
  3741 }
  3742 
  3743 /* allocate a small request from the best fitting chunk in a treebin */
  3744 static void* tmalloc_small(mstate m, size_t nb) {
  3745   tchunkptr t, v;
  3746   size_t rsize;
  3747   bindex_t i;
  3748   binmap_t leastbit = least_bit(m->treemap);
  3749   compute_bit2idx(leastbit, i);
  3750 
  3751   v = t = *treebin_at(m, i);
  3752   rsize = chunksize(t) - nb;
  3753 
  3754   while ((t = leftmost_child(t)) != 0) {
  3755     size_t trem = chunksize(t) - nb;
  3756     if (trem < rsize) {
  3757       rsize = trem;
  3758       v = t;
  3759     }
  3760   }
  3761 
  3762   if (RTCHECK(ok_address(m, v))) {
  3763     mchunkptr r = chunk_plus_offset(v, nb);
  3764     assert(chunksize(v) == rsize + nb);
  3765     if (RTCHECK(ok_next(v, r))) {
  3766       unlink_large_chunk(m, v);
  3767       if (rsize < MIN_CHUNK_SIZE)
  3768         set_inuse_and_pinuse(m, v, (rsize + nb));
  3769       else {
  3770         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  3771         set_size_and_pinuse_of_free_chunk(r, rsize);
  3772         replace_dv(m, r, rsize);
  3773       }
  3774       return chunk2mem(v);
  3775     }
  3776   }
  3777 
  3778   CORRUPTION_ERROR_ACTION(m);
  3779   return 0;
  3780 }
  3781 
  3782 /* --------------------------- realloc support --------------------------- */
  3783 
  3784 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
  3785   if (bytes >= MAX_REQUEST) {
  3786     MALLOC_FAILURE_ACTION;
  3787     return 0;
  3788   }
  3789   if (!PREACTION(m)) {
  3790     mchunkptr oldp = mem2chunk(oldmem);
  3791     size_t oldsize = chunksize(oldp);
  3792     mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3793     mchunkptr newp = 0;
  3794     void* extra = 0;
  3795 
  3796     /* Try to either shrink or extend into top. Else malloc-copy-free */
  3797 
  3798     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3799                 ok_next(oldp, next) && ok_pinuse(next))) {
  3800       size_t nb = request2size(bytes);
  3801       if (is_mmapped(oldp))
  3802         newp = mmap_resize(m, oldp, nb);
  3803       else if (oldsize >= nb) { /* already big enough */
  3804         size_t rsize = oldsize - nb;
  3805         newp = oldp;
  3806         if (rsize >= MIN_CHUNK_SIZE) {
  3807           mchunkptr remainder = chunk_plus_offset(newp, nb);
  3808           set_inuse(m, newp, nb);
  3809           set_inuse(m, remainder, rsize);
  3810           extra = chunk2mem(remainder);
  3811         }
  3812       }
  3813       else if (next == m->top && oldsize + m->topsize > nb) {
  3814         /* Expand into top */
  3815         size_t newsize = oldsize + m->topsize;
  3816         size_t newtopsize = newsize - nb;
  3817         mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3818         set_inuse(m, oldp, nb);
  3819         newtop->head = newtopsize |PINUSE_BIT;
  3820         m->top = newtop;
  3821         m->topsize = newtopsize;
  3822         newp = oldp;
  3823       }
  3824     }
  3825     else {
  3826       USAGE_ERROR_ACTION(m, oldmem);
  3827       POSTACTION(m);
  3828       return 0;
  3829     }
  3830 
  3831     POSTACTION(m);
  3832 
  3833     if (newp != 0) {
  3834       if (extra != 0) {
  3835         internal_free(m, extra);
  3836       }
  3837       check_inuse_chunk(m, newp);
  3838       return chunk2mem(newp);
  3839     }
  3840     else {
  3841       void* newmem = internal_malloc(m, bytes);
  3842       if (newmem != 0) {
  3843         size_t oc = oldsize - overhead_for(oldp);
  3844         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
  3845         internal_free(m, oldmem);
  3846       }
  3847       return newmem;
  3848     }
  3849   }
  3850   return 0;
  3851 }
  3852 
  3853 /* --------------------------- memalign support -------------------------- */
  3854 
  3855 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
  3856   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
  3857     return internal_malloc(m, bytes);
  3858   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
  3859     alignment = MIN_CHUNK_SIZE;
  3860   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
  3861     size_t a = MALLOC_ALIGNMENT << 1;
  3862     while (a < alignment) a <<= 1;
  3863     alignment = a;
  3864   }
  3865   
  3866   if (bytes >= MAX_REQUEST - alignment) {
  3867     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
  3868       MALLOC_FAILURE_ACTION;
  3869     }
  3870   }
  3871   else {
  3872     size_t nb = request2size(bytes);
  3873     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  3874     char* mem = (char*)internal_malloc(m, req);
  3875     if (mem != 0) {
  3876       void* leader = 0;
  3877       void* trailer = 0;
  3878       mchunkptr p = mem2chunk(mem);
  3879 
  3880       if (PREACTION(m)) return 0;
  3881       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
  3882         /*
  3883           Find an aligned spot inside chunk.  Since we need to give
  3884           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  3885           the first calculation places us at a spot with less than
  3886           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  3887           We've allocated enough total room so that this is always
  3888           possible.
  3889         */
  3890         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
  3891                                                        alignment -
  3892                                                        SIZE_T_ONE)) &
  3893                                              -alignment));
  3894         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
  3895           br : br+alignment;
  3896         mchunkptr newp = (mchunkptr)pos;
  3897         size_t leadsize = pos - (char*)(p);
  3898         size_t newsize = chunksize(p) - leadsize;
  3899 
  3900         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
  3901           newp->prev_foot = p->prev_foot + leadsize;
  3902           newp->head = (newsize|CINUSE_BIT);
  3903         }
  3904         else { /* Otherwise, give back leader, use the rest */
  3905           set_inuse(m, newp, newsize);
  3906           set_inuse(m, p, leadsize);
  3907           leader = chunk2mem(p);
  3908         }
  3909         p = newp;
  3910       }
  3911 
  3912       /* Give back spare room at the end */
  3913       if (!is_mmapped(p)) {
  3914         size_t size = chunksize(p);
  3915         if (size > nb + MIN_CHUNK_SIZE) {
  3916           size_t remainder_size = size - nb;
  3917           mchunkptr remainder = chunk_plus_offset(p, nb);
  3918           set_inuse(m, p, nb);
  3919           set_inuse(m, remainder, remainder_size);
  3920           trailer = chunk2mem(remainder);
  3921         }
  3922       }
  3923 
  3924       assert (chunksize(p) >= nb);
  3925       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
  3926       check_inuse_chunk(m, p);
  3927       POSTACTION(m);
  3928       if (leader != 0) {
  3929         internal_free(m, leader);
  3930       }
  3931       if (trailer != 0) {
  3932         internal_free(m, trailer);
  3933       }
  3934       return chunk2mem(p);
  3935     }
  3936   }
  3937   return 0;
  3938 }
  3939 
  3940 /* ------------------------ comalloc/coalloc support --------------------- */
  3941 
  3942 static void** ialloc(mstate m,
  3943                      size_t n_elements,
  3944                      size_t* sizes,
  3945                      int opts,
  3946                      void* chunks[]) {
  3947   /*
  3948     This provides common support for independent_X routines, handling
  3949     all of the combinations that can result.
  3950 
  3951     The opts arg has:
  3952     bit 0 set if all elements are same size (using sizes[0])
  3953     bit 1 set if elements should be zeroed
  3954   */
  3955 
  3956   size_t    element_size;   /* chunksize of each element, if all same */
  3957   size_t    contents_size;  /* total size of elements */
  3958   size_t    array_size;     /* request size of pointer array */
  3959   void*     mem;            /* malloced aggregate space */
  3960   mchunkptr p;              /* corresponding chunk */
  3961   size_t    remainder_size; /* remaining bytes while splitting */
  3962   void**    marray;         /* either "chunks" or malloced ptr array */
  3963   mchunkptr array_chunk;    /* chunk for malloced ptr array */
  3964   flag_t    was_enabled;    /* to disable mmap */
  3965   size_t    size;
  3966   size_t    i;
  3967 
  3968   /* compute array length, if needed */
  3969   if (chunks != 0) {
  3970     if (n_elements == 0)
  3971       return chunks; /* nothing to do */
  3972     marray = chunks;
  3973     array_size = 0;
  3974   }
  3975   else {
  3976     /* if empty req, must still return chunk representing empty array */
  3977     if (n_elements == 0)
  3978       return (void**)internal_malloc(m, 0);
  3979     marray = 0;
  3980     array_size = request2size(n_elements * (sizeof(void*)));
  3981   }
  3982 
  3983   /* compute total element size */
  3984   if (opts & 0x1) { /* all-same-size */
  3985     element_size = request2size(*sizes);
  3986     contents_size = n_elements * element_size;
  3987   }
  3988   else { /* add up all the sizes */
  3989     element_size = 0;
  3990     contents_size = 0;
  3991     for (i = 0; i != n_elements; ++i)
  3992       contents_size += request2size(sizes[i]);
  3993   }
  3994 
  3995   size = contents_size + array_size;
  3996 
  3997   /*
  3998      Allocate the aggregate chunk.  First disable direct-mmapping so
  3999      malloc won't use it, since we would not be able to later
  4000      free/realloc space internal to a segregated mmap region.
  4001   */
  4002   was_enabled = use_mmap(m);
  4003   disable_mmap(m);
  4004   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  4005   if (was_enabled)
  4006     enable_mmap(m);
  4007   if (mem == 0)
  4008     return 0;
  4009 
  4010   if (PREACTION(m)) return 0;
  4011   p = mem2chunk(mem);
  4012   remainder_size = chunksize(p);
  4013 
  4014   assert(!is_mmapped(p));
  4015 
  4016   if (opts & 0x2) {       /* optionally clear the elements */
  4017     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  4018   }
  4019 
  4020   /* If not provided, allocate the pointer array as final part of chunk */
  4021   if (marray == 0) {
  4022     size_t  array_chunk_size;
  4023     array_chunk = chunk_plus_offset(p, contents_size);
  4024     array_chunk_size = remainder_size - contents_size;
  4025     marray = (void**) (chunk2mem(array_chunk));
  4026     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  4027     remainder_size = contents_size;
  4028   }
  4029 
  4030   /* split out elements */
  4031   for (i = 0; ; ++i) {
  4032     marray[i] = chunk2mem(p);
  4033     if (i != n_elements-1) {
  4034       if (element_size != 0)
  4035         size = element_size;
  4036       else
  4037         size = request2size(sizes[i]);
  4038       remainder_size -= size;
  4039       set_size_and_pinuse_of_inuse_chunk(m, p, size);
  4040       p = chunk_plus_offset(p, size);
  4041     }
  4042     else { /* the final element absorbs any overallocation slop */
  4043       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  4044       break;
  4045     }
  4046   }
  4047 
  4048 #if DEBUG
  4049   if (marray != chunks) {
  4050     /* final element must have exactly exhausted chunk */
  4051     if (element_size != 0) {
  4052       assert(remainder_size == element_size);
  4053     }
  4054     else {
  4055       assert(remainder_size == request2size(sizes[i]));
  4056     }
  4057     check_inuse_chunk(m, mem2chunk(marray));
  4058   }
  4059   for (i = 0; i != n_elements; ++i)
  4060     check_inuse_chunk(m, mem2chunk(marray[i]));
  4061 
  4062 #endif /* DEBUG */
  4063 
  4064   POSTACTION(m);
  4065   return marray;
  4066 }
  4067 
  4068 
  4069 /* -------------------------- public routines ---------------------------- */
  4070 
  4071 #if !ONLY_MSPACES
  4072 
  4073 void* dlmalloc(size_t bytes) {
  4074   /*
  4075      Basic algorithm:
  4076      If a small request (< 256 bytes minus per-chunk overhead):
  4077        1. If one exists, use a remainderless chunk in associated smallbin.
  4078           (Remainderless means that there are too few excess bytes to
  4079           represent as a chunk.)
  4080        2. If it is big enough, use the dv chunk, which is normally the
  4081           chunk adjacent to the one used for the most recent small request.
  4082        3. If one exists, split the smallest available chunk in a bin,
  4083           saving remainder in dv.
  4084        4. If it is big enough, use the top chunk.
  4085        5. If available, get memory from system and use it
  4086      Otherwise, for a large request:
  4087        1. Find the smallest available binned chunk that fits, and use it
  4088           if it is better fitting than dv chunk, splitting if necessary.
  4089        2. If better fitting than any binned chunk, use the dv chunk.
  4090        3. If it is big enough, use the top chunk.
  4091        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4092        5. If available, get memory from system and use it
  4093 
  4094      The ugly goto's here ensure that postaction occurs along all paths.
  4095   */
  4096 
  4097   if (!PREACTION(gm)) {
  4098     void* mem;
  4099     size_t nb;
  4100     if (bytes <= MAX_SMALL_REQUEST) {
  4101       bindex_t idx;
  4102       binmap_t smallbits;
  4103       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4104       idx = small_index(nb);
  4105       smallbits = gm->smallmap >> idx;
  4106 
  4107       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4108         mchunkptr b, p;
  4109         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4110         b = smallbin_at(gm, idx);
  4111         p = b->fd;
  4112         assert(chunksize(p) == small_index2size(idx));
  4113         unlink_first_small_chunk(gm, b, p, idx);
  4114         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4115         mem = chunk2mem(p);
  4116         check_malloced_chunk(gm, mem, nb);
  4117         goto postaction;
  4118       }
  4119 
  4120       else if (nb > gm->dvsize) {
  4121         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4122           mchunkptr b, p, r;
  4123           size_t rsize;
  4124           bindex_t i;
  4125           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4126           binmap_t leastbit = least_bit(leftbits);
  4127           compute_bit2idx(leastbit, i);
  4128           b = smallbin_at(gm, i);
  4129           p = b->fd;
  4130           assert(chunksize(p) == small_index2size(i));
  4131           unlink_first_small_chunk(gm, b, p, i);
  4132           rsize = small_index2size(i) - nb;
  4133           /* Fit here cannot be remainderless if 4byte sizes */
  4134           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4135             set_inuse_and_pinuse(gm, p, small_index2size(i));
  4136           else {
  4137             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4138             r = chunk_plus_offset(p, nb);
  4139             set_size_and_pinuse_of_free_chunk(r, rsize);
  4140             replace_dv(gm, r, rsize);
  4141           }
  4142           mem = chunk2mem(p);
  4143           check_malloced_chunk(gm, mem, nb);
  4144           goto postaction;
  4145         }
  4146 
  4147         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  4148           check_malloced_chunk(gm, mem, nb);
  4149           goto postaction;
  4150         }
  4151       }
  4152     }
  4153     else if (bytes >= MAX_REQUEST)
  4154       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4155     else {
  4156       nb = pad_request(bytes);
  4157       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  4158         check_malloced_chunk(gm, mem, nb);
  4159         goto postaction;
  4160       }
  4161     }
  4162 
  4163     if (nb <= gm->dvsize) {
  4164       size_t rsize = gm->dvsize - nb;
  4165       mchunkptr p = gm->dv;
  4166       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4167         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  4168         gm->dvsize = rsize;
  4169         set_size_and_pinuse_of_free_chunk(r, rsize);
  4170         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4171       }
  4172       else { /* exhaust dv */
  4173         size_t dvs = gm->dvsize;
  4174         gm->dvsize = 0;
  4175         gm->dv = 0;
  4176         set_inuse_and_pinuse(gm, p, dvs);
  4177       }
  4178       mem = chunk2mem(p);
  4179       check_malloced_chunk(gm, mem, nb);
  4180       goto postaction;
  4181     }
  4182 
  4183     else if (nb < gm->topsize) { /* Split top */
  4184       size_t rsize = gm->topsize -= nb;
  4185       mchunkptr p = gm->top;
  4186       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  4187       r->head = rsize | PINUSE_BIT;
  4188       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4189       mem = chunk2mem(p);
  4190       check_top_chunk(gm, gm->top);
  4191       check_malloced_chunk(gm, mem, nb);
  4192       goto postaction;
  4193     }
  4194 
  4195     mem = sys_alloc(gm, nb);
  4196 
  4197   postaction:
  4198     POSTACTION(gm);
  4199     return mem;
  4200   }
  4201 
  4202   return 0;
  4203 }
  4204 
  4205 void dlfree(void* mem) {
  4206   /*
  4207      Consolidate freed chunks with preceeding or succeeding bordering
  4208      free chunks, if they exist, and then place in a bin.  Intermixed
  4209      with special cases for top, dv, mmapped chunks, and usage errors.
  4210   */
  4211 
  4212   if (mem != 0) {
  4213     mchunkptr p  = mem2chunk(mem);
  4214 #if FOOTERS
  4215     mstate fm = get_mstate_for(p);
  4216     if (!ok_magic(fm)) {
  4217       USAGE_ERROR_ACTION(fm, p);
  4218       return;
  4219     }
  4220 #else /* FOOTERS */
  4221 #define fm gm
  4222 #endif /* FOOTERS */
  4223     if (!PREACTION(fm)) {
  4224       check_inuse_chunk(fm, p);
  4225       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4226         size_t psize = chunksize(p);
  4227         mchunkptr next = chunk_plus_offset(p, psize);
  4228         if (!pinuse(p)) {
  4229           size_t prevsize = p->prev_foot;
  4230           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4231             prevsize &= ~IS_MMAPPED_BIT;
  4232             psize += prevsize + MMAP_FOOT_PAD;
  4233             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4234               fm->footprint -= psize;
  4235             goto postaction;
  4236           }
  4237           else {
  4238             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4239             psize += prevsize;
  4240             p = prev;
  4241             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4242               if (p != fm->dv) {
  4243                 unlink_chunk(fm, p, prevsize);
  4244               }
  4245               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4246                 fm->dvsize = psize;
  4247                 set_free_with_pinuse(p, psize, next);
  4248                 goto postaction;
  4249               }
  4250             }
  4251             else
  4252               goto erroraction;
  4253           }
  4254         }
  4255 
  4256         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4257           if (!cinuse(next)) {  /* consolidate forward */
  4258             if (next == fm->top) {
  4259               size_t tsize = fm->topsize += psize;
  4260               fm->top = p;
  4261               p->head = tsize | PINUSE_BIT;
  4262               if (p == fm->dv) {
  4263                 fm->dv = 0;
  4264                 fm->dvsize = 0;
  4265               }
  4266               if (should_trim(fm, tsize))
  4267                 sys_trim(fm, 0);
  4268               goto postaction;
  4269             }
  4270             else if (next == fm->dv) {
  4271               size_t dsize = fm->dvsize += psize;
  4272               fm->dv = p;
  4273               set_size_and_pinuse_of_free_chunk(p, dsize);
  4274               goto postaction;
  4275             }
  4276             else {
  4277               size_t nsize = chunksize(next);
  4278               psize += nsize;
  4279               unlink_chunk(fm, next, nsize);
  4280               set_size_and_pinuse_of_free_chunk(p, psize);
  4281               if (p == fm->dv) {
  4282                 fm->dvsize = psize;
  4283                 goto postaction;
  4284               }
  4285             }
  4286           }
  4287           else
  4288             set_free_with_pinuse(p, psize, next);
  4289           insert_chunk(fm, p, psize);
  4290           check_free_chunk(fm, p);
  4291           goto postaction;
  4292         }
  4293       }
  4294     erroraction:
  4295       USAGE_ERROR_ACTION(fm, p);
  4296     postaction:
  4297       POSTACTION(fm);
  4298     }
  4299   }
  4300 #if !FOOTERS
  4301 #undef fm
  4302 #endif /* FOOTERS */
  4303 }
  4304 
  4305 void* dlcalloc(size_t n_elements, size_t elem_size) {
  4306   void* mem;
  4307   size_t req = 0;
  4308   if (n_elements != 0) {
  4309     req = n_elements * elem_size;
  4310     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4311         (req / n_elements != elem_size))
  4312       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4313   }
  4314   mem = dlmalloc(req);
  4315   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4316     memset(mem, 0, req);
  4317   return mem;
  4318 }
  4319 
  4320 void* dlrealloc(void* oldmem, size_t bytes) {
  4321   if (oldmem == 0)
  4322     return dlmalloc(bytes);
  4323 #ifdef REALLOC_ZERO_BYTES_FREES
  4324   if (bytes == 0) {
  4325     dlfree(oldmem);
  4326     return 0;
  4327   }
  4328 #endif /* REALLOC_ZERO_BYTES_FREES */
  4329   else {
  4330 #if ! FOOTERS
  4331     mstate m = gm;
  4332 #else /* FOOTERS */
  4333     mstate m = get_mstate_for(mem2chunk(oldmem));
  4334     if (!ok_magic(m)) {
  4335       USAGE_ERROR_ACTION(m, oldmem);
  4336       return 0;
  4337     }
  4338 #endif /* FOOTERS */
  4339     return internal_realloc(m, oldmem, bytes);
  4340   }
  4341 }
  4342 
  4343 void* dlmemalign(size_t alignment, size_t bytes) {
  4344   return internal_memalign(gm, alignment, bytes);
  4345 }
  4346 
  4347 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
  4348                                  void* chunks[]) {
  4349   size_t sz = elem_size; /* serves as 1-element array */
  4350   return ialloc(gm, n_elements, &sz, 3, chunks);
  4351 }
  4352 
  4353 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
  4354                                    void* chunks[]) {
  4355   return ialloc(gm, n_elements, sizes, 0, chunks);
  4356 }
  4357 
  4358 void* dlvalloc(size_t bytes) {
  4359   size_t pagesz;
  4360   init_mparams();
  4361   pagesz = mparams.page_size;
  4362   return dlmemalign(pagesz, bytes);
  4363 }
  4364 
  4365 void* dlpvalloc(size_t bytes) {
  4366   size_t pagesz;
  4367   init_mparams();
  4368   pagesz = mparams.page_size;
  4369   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  4370 }
  4371 
  4372 int dlmalloc_trim(size_t pad) {
  4373   int result = 0;
  4374   if (!PREACTION(gm)) {
  4375     result = sys_trim(gm, pad);
  4376     POSTACTION(gm);
  4377   }
  4378   return result;
  4379 }
  4380 
  4381 size_t dlmalloc_footprint(void) {
  4382   return gm->footprint;
  4383 }
  4384 
  4385 size_t dlmalloc_max_footprint(void) {
  4386   return gm->max_footprint;
  4387 }
  4388 
  4389 #if !NO_MALLINFO
  4390 struct mallinfo dlmallinfo(void) {
  4391   return internal_mallinfo(gm);
  4392 }
  4393 #endif /* NO_MALLINFO */
  4394 
  4395 void dlmalloc_stats() {
  4396   internal_malloc_stats(gm);
  4397 }
  4398 
  4399 size_t dlmalloc_usable_size(void* mem) {
  4400   if (mem != 0) {
  4401     mchunkptr p = mem2chunk(mem);
  4402     if (cinuse(p))
  4403       return chunksize(p) - overhead_for(p);
  4404   }
  4405   return 0;
  4406 }
  4407 
  4408 int dlmallopt(int param_number, int value) {
  4409   return change_mparam(param_number, value);
  4410 }
  4411 
  4412 #endif /* !ONLY_MSPACES */
  4413 
  4414 /* ----------------------------- user mspaces ---------------------------- */
  4415 
  4416 #if MSPACES
  4417 
  4418 static mstate init_user_mstate(char* tbase, size_t tsize) {
  4419   size_t msize = pad_request(sizeof(struct malloc_state));
  4420   mchunkptr mn;
  4421   mchunkptr msp = align_as_chunk(tbase);
  4422   mstate m = (mstate)(chunk2mem(msp));
  4423   memset(m, 0, msize);
  4424   INITIAL_LOCK(&m->mutex);
  4425   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  4426   m->seg.base = m->least_addr = tbase;
  4427   m->seg.size = m->footprint = m->max_footprint = tsize;
  4428   m->magic = mparams.magic;
  4429   m->mflags = mparams.default_mflags;
  4430   disable_contiguous(m);
  4431   init_bins(m);
  4432   mn = next_chunk(mem2chunk(m));
  4433   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  4434   check_top_chunk(m, m->top);
  4435   return m;
  4436 }
  4437 
  4438 mspace create_mspace(size_t capacity, int locked) {
  4439   mstate m = 0;
  4440   size_t msize = pad_request(sizeof(struct malloc_state));
  4441   init_mparams(); /* Ensure pagesize etc initialized */
  4442 
  4443   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4444     size_t rs = ((capacity == 0)? mparams.granularity :
  4445                  (capacity + TOP_FOOT_SIZE + msize));
  4446     size_t tsize = granularity_align(rs);
  4447     char* tbase = (char*)(CALL_MMAP(tsize));
  4448     if (tbase != CMFAIL) {
  4449       m = init_user_mstate(tbase, tsize);
  4450       m->seg.sflags = IS_MMAPPED_BIT;
  4451       set_lock(m, locked);
  4452     }
  4453   }
  4454   return (mspace)m;
  4455 }
  4456 
  4457 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  4458   mstate m = 0;
  4459   size_t msize = pad_request(sizeof(struct malloc_state));
  4460   init_mparams(); /* Ensure pagesize etc initialized */
  4461 
  4462   if (capacity > msize + TOP_FOOT_SIZE &&
  4463       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4464     m = init_user_mstate((char*)base, capacity);
  4465     m->seg.sflags = EXTERN_BIT;
  4466     set_lock(m, locked);
  4467   }
  4468   return (mspace)m;
  4469 }
  4470 
  4471 size_t destroy_mspace(mspace msp) {
  4472   size_t freed = 0;
  4473   mstate ms = (mstate)msp;
  4474   if (ok_magic(ms)) {
  4475     msegmentptr sp = &ms->seg;
  4476     while (sp != 0) {
  4477       char* base = sp->base;
  4478       size_t size = sp->size;
  4479       flag_t flag = sp->sflags;
  4480       sp = sp->next;
  4481       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4482           CALL_MUNMAP(base, size) == 0)
  4483         freed += size;
  4484     }
  4485   }
  4486   else {
  4487     USAGE_ERROR_ACTION(ms,ms);
  4488   }
  4489   return freed;
  4490 }
  4491 
  4492 /*
  4493   mspace versions of routines are near-clones of the global
  4494   versions. This is not so nice but better than the alternatives.
  4495 */
  4496 
  4497 
  4498 void* mspace_malloc(mspace msp, size_t bytes) {
  4499   mstate ms = (mstate)msp;
  4500   if (!ok_magic(ms)) {
  4501     USAGE_ERROR_ACTION(ms,ms);
  4502     return 0;
  4503   }
  4504   if (!PREACTION(ms)) {
  4505     void* mem;
  4506     size_t nb;
  4507     if (bytes <= MAX_SMALL_REQUEST) {
  4508       bindex_t idx;
  4509       binmap_t smallbits;
  4510       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4511       idx = small_index(nb);
  4512       smallbits = ms->smallmap >> idx;
  4513 
  4514       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4515         mchunkptr b, p;
  4516         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4517         b = smallbin_at(ms, idx);
  4518         p = b->fd;
  4519         assert(chunksize(p) == small_index2size(idx));
  4520         unlink_first_small_chunk(ms, b, p, idx);
  4521         set_inuse_and_pinuse(ms, p, small_index2size(idx));
  4522         mem = chunk2mem(p);
  4523         check_malloced_chunk(ms, mem, nb);
  4524         goto postaction;
  4525       }
  4526 
  4527       else if (nb > ms->dvsize) {
  4528         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4529           mchunkptr b, p, r;
  4530           size_t rsize;
  4531           bindex_t i;
  4532           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4533           binmap_t leastbit = least_bit(leftbits);
  4534           compute_bit2idx(leastbit, i);
  4535           b = smallbin_at(ms, i);
  4536           p = b->fd;
  4537           assert(chunksize(p) == small_index2size(i));
  4538           unlink_first_small_chunk(ms, b, p, i);
  4539           rsize = small_index2size(i) - nb;
  4540           /* Fit here cannot be remainderless if 4byte sizes */
  4541           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4542             set_inuse_and_pinuse(ms, p, small_index2size(i));
  4543           else {
  4544             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4545             r = chunk_plus_offset(p, nb);
  4546             set_size_and_pinuse_of_free_chunk(r, rsize);
  4547             replace_dv(ms, r, rsize);
  4548           }
  4549           mem = chunk2mem(p);
  4550           check_malloced_chunk(ms, mem, nb);
  4551           goto postaction;
  4552         }
  4553 
  4554         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  4555           check_malloced_chunk(ms, mem, nb);
  4556           goto postaction;
  4557         }
  4558       }
  4559     }
  4560     else if (bytes >= MAX_REQUEST)
  4561       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4562     else {
  4563       nb = pad_request(bytes);
  4564       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  4565         check_malloced_chunk(ms, mem, nb);
  4566         goto postaction;
  4567       }
  4568     }
  4569 
  4570     if (nb <= ms->dvsize) {
  4571       size_t rsize = ms->dvsize - nb;
  4572       mchunkptr p = ms->dv;
  4573       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  4574         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  4575         ms->dvsize = rsize;
  4576         set_size_and_pinuse_of_free_chunk(r, rsize);
  4577         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4578       }
  4579       else { /* exhaust dv */
  4580         size_t dvs = ms->dvsize;
  4581         ms->dvsize = 0;
  4582         ms->dv = 0;
  4583         set_inuse_and_pinuse(ms, p, dvs);
  4584       }
  4585       mem = chunk2mem(p);
  4586       check_malloced_chunk(ms, mem, nb);
  4587       goto postaction;
  4588     }
  4589 
  4590     else if (nb < ms->topsize) { /* Split top */
  4591       size_t rsize = ms->topsize -= nb;
  4592       mchunkptr p = ms->top;
  4593       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  4594       r->head = rsize | PINUSE_BIT;
  4595       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  4596       mem = chunk2mem(p);
  4597       check_top_chunk(ms, ms->top);
  4598       check_malloced_chunk(ms, mem, nb);
  4599       goto postaction;
  4600     }
  4601 
  4602     mem = sys_alloc(ms, nb);
  4603 
  4604   postaction:
  4605     POSTACTION(ms);
  4606     return mem;
  4607   }
  4608 
  4609   return 0;
  4610 }
  4611 
  4612 void mspace_free(mspace msp, void* mem) {
  4613   if (mem != 0) {
  4614     mchunkptr p  = mem2chunk(mem);
  4615 #if FOOTERS
  4616     mstate fm = get_mstate_for(p);
  4617 #else /* FOOTERS */
  4618     mstate fm = (mstate)msp;
  4619 #endif /* FOOTERS */
  4620     if (!ok_magic(fm)) {
  4621       USAGE_ERROR_ACTION(fm, p);
  4622       return;
  4623     }
  4624     if (!PREACTION(fm)) {
  4625       check_inuse_chunk(fm, p);
  4626       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4627         size_t psize = chunksize(p);
  4628         mchunkptr next = chunk_plus_offset(p, psize);
  4629         if (!pinuse(p)) {
  4630           size_t prevsize = p->prev_foot;
  4631           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4632             prevsize &= ~IS_MMAPPED_BIT;
  4633             psize += prevsize + MMAP_FOOT_PAD;
  4634             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4635               fm->footprint -= psize;
  4636             goto postaction;
  4637           }
  4638           else {
  4639             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4640             psize += prevsize;
  4641             p = prev;
  4642             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4643               if (p != fm->dv) {
  4644                 unlink_chunk(fm, p, prevsize);
  4645               }
  4646               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4647                 fm->dvsize = psize;
  4648                 set_free_with_pinuse(p, psize, next);
  4649                 goto postaction;
  4650               }
  4651             }
  4652             else
  4653               goto erroraction;
  4654           }
  4655         }
  4656 
  4657         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4658           if (!cinuse(next)) {  /* consolidate forward */
  4659             if (next == fm->top) {
  4660               size_t tsize = fm->topsize += psize;
  4661               fm->top = p;
  4662               p->head = tsize | PINUSE_BIT;
  4663               if (p == fm->dv) {
  4664                 fm->dv = 0;
  4665                 fm->dvsize = 0;
  4666               }
  4667               if (should_trim(fm, tsize))
  4668                 sys_trim(fm, 0);
  4669               goto postaction;
  4670             }
  4671             else if (next == fm->dv) {
  4672               size_t dsize = fm->dvsize += psize;
  4673               fm->dv = p;
  4674               set_size_and_pinuse_of_free_chunk(p, dsize);
  4675               goto postaction;
  4676             }
  4677             else {
  4678               size_t nsize = chunksize(next);
  4679               psize += nsize;
  4680               unlink_chunk(fm, next, nsize);
  4681               set_size_and_pinuse_of_free_chunk(p, psize);
  4682               if (p == fm->dv) {
  4683                 fm->dvsize = psize;
  4684                 goto postaction;
  4685               }
  4686             }
  4687           }
  4688           else
  4689             set_free_with_pinuse(p, psize, next);
  4690           insert_chunk(fm, p, psize);
  4691           check_free_chunk(fm, p);
  4692           goto postaction;
  4693         }
  4694       }
  4695     erroraction:
  4696       USAGE_ERROR_ACTION(fm, p);
  4697     postaction:
  4698       POSTACTION(fm);
  4699     }
  4700   }
  4701 }
  4702 
  4703 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
  4704   void* mem;
  4705   size_t req = 0;
  4706   mstate ms = (mstate)msp;
  4707   if (!ok_magic(ms)) {
  4708     USAGE_ERROR_ACTION(ms,ms);
  4709     return 0;
  4710   }
  4711   if (n_elements != 0) {
  4712     req = n_elements * elem_size;
  4713     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4714         (req / n_elements != elem_size))
  4715       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4716   }
  4717   mem = internal_malloc(ms, req);
  4718   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4719     memset(mem, 0, req);
  4720   return mem;
  4721 }
  4722 
  4723 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
  4724   if (oldmem == 0)
  4725     return mspace_malloc(msp, bytes);
  4726 #ifdef REALLOC_ZERO_BYTES_FREES
  4727   if (bytes == 0) {
  4728     mspace_free(msp, oldmem);
  4729     return 0;
  4730   }
  4731 #endif /* REALLOC_ZERO_BYTES_FREES */
  4732   else {
  4733 #if FOOTERS
  4734     mchunkptr p  = mem2chunk(oldmem);
  4735     mstate ms = get_mstate_for(p);
  4736 #else /* FOOTERS */
  4737     mstate ms = (mstate)msp;
  4738 #endif /* FOOTERS */
  4739     if (!ok_magic(ms)) {
  4740       USAGE_ERROR_ACTION(ms,ms);
  4741       return 0;
  4742     }
  4743     return internal_realloc(ms, oldmem, bytes);
  4744   }
  4745 }
  4746 
  4747 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
  4748   mstate ms = (mstate)msp;
  4749   if (!ok_magic(ms)) {
  4750     USAGE_ERROR_ACTION(ms,ms);
  4751     return 0;
  4752   }
  4753   return internal_memalign(ms, alignment, bytes);
  4754 }
  4755 
  4756 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  4757                                  size_t elem_size, void* chunks[]) {
  4758   size_t sz = elem_size; /* serves as 1-element array */
  4759   mstate ms = (mstate)msp;
  4760   if (!ok_magic(ms)) {
  4761     USAGE_ERROR_ACTION(ms,ms);
  4762     return 0;
  4763   }
  4764   return ialloc(ms, n_elements, &sz, 3, chunks);
  4765 }
  4766 
  4767 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  4768                                    size_t sizes[], void* chunks[]) {
  4769   mstate ms = (mstate)msp;
  4770   if (!ok_magic(ms)) {
  4771     USAGE_ERROR_ACTION(ms,ms);
  4772     return 0;
  4773   }
  4774   return ialloc(ms, n_elements, sizes, 0, chunks);
  4775 }
  4776 
  4777 int mspace_trim(mspace msp, size_t pad) {
  4778   int result = 0;
  4779   mstate ms = (mstate)msp;
  4780   if (ok_magic(ms)) {
  4781     if (!PREACTION(ms)) {
  4782       result = sys_trim(ms, pad);
  4783       POSTACTION(ms);
  4784     }
  4785   }
  4786   else {
  4787     USAGE_ERROR_ACTION(ms,ms);
  4788   }
  4789   return result;
  4790 }
  4791 
  4792 void mspace_malloc_stats(mspace msp) {
  4793   mstate ms = (mstate)msp;
  4794   if (ok_magic(ms)) {
  4795     internal_malloc_stats(ms);
  4796   }
  4797   else {
  4798     USAGE_ERROR_ACTION(ms,ms);
  4799   }
  4800 }
  4801 
  4802 size_t mspace_footprint(mspace msp) {
  4803   size_t result;
  4804   mstate ms = (mstate)msp;
  4805   if (ok_magic(ms)) {
  4806     result = ms->footprint;
  4807   }
  4808   USAGE_ERROR_ACTION(ms,ms);
  4809   return result;
  4810 }
  4811 
  4812 
  4813 size_t mspace_max_footprint(mspace msp) {
  4814   size_t result;
  4815   mstate ms = (mstate)msp;
  4816   if (ok_magic(ms)) {
  4817     result = ms->max_footprint;
  4818   }
  4819   USAGE_ERROR_ACTION(ms,ms);
  4820   return result;
  4821 }
  4822 
  4823 
  4824 #if !NO_MALLINFO
  4825 struct mallinfo mspace_mallinfo(mspace msp) {
  4826   mstate ms = (mstate)msp;
  4827   if (!ok_magic(ms)) {
  4828     USAGE_ERROR_ACTION(ms,ms);
  4829   }
  4830   return internal_mallinfo(ms);
  4831 }
  4832 #endif /* NO_MALLINFO */
  4833 
  4834 int mspace_mallopt(int param_number, int value) {
  4835   return change_mparam(param_number, value);
  4836 }
  4837 
  4838 #endif /* MSPACES */
  4839 
  4840 /* -------------------- Alternative MORECORE functions ------------------- */
  4841 
  4842 /*
  4843   Guidelines for creating a custom version of MORECORE:
  4844 
  4845   * For best performance, MORECORE should allocate in multiples of pagesize.
  4846   * MORECORE may allocate more memory than requested. (Or even less,
  4847       but this will usually result in a malloc failure.)
  4848   * MORECORE must not allocate memory when given argument zero, but
  4849       instead return one past the end address of memory from previous
  4850       nonzero call.
  4851   * For best performance, consecutive calls to MORECORE with positive
  4852       arguments should return increasing addresses, indicating that
  4853       space has been contiguously extended.
  4854   * Even though consecutive calls to MORECORE need not return contiguous
  4855       addresses, it must be OK for malloc'ed chunks to span multiple
  4856       regions in those cases where they do happen to be contiguous.
  4857   * MORECORE need not handle negative arguments -- it may instead
  4858       just return MFAIL when given negative arguments.
  4859       Negative arguments are always multiples of pagesize. MORECORE
  4860       must not misinterpret negative args as large positive unsigned
  4861       args. You can suppress all such calls from even occurring by defining
  4862       MORECORE_CANNOT_TRIM,
  4863 
  4864   As an example alternative MORECORE, here is a custom allocator
  4865   kindly contributed for pre-OSX macOS.  It uses virtually but not
  4866   necessarily physically contiguous non-paged memory (locked in,
  4867   present and won't get swapped out).  You can use it by uncommenting
  4868   this section, adding some #includes, and setting up the appropriate
  4869   defines above:
  4870 
  4871       #define MORECORE osMoreCore
  4872 
  4873   There is also a shutdown routine that should somehow be called for
  4874   cleanup upon program exit.
  4875 
  4876   #define MAX_POOL_ENTRIES 100
  4877   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  4878   static int next_os_pool;
  4879   void *our_os_pools[MAX_POOL_ENTRIES];
  4880 
  4881   void *osMoreCore(int size)
  4882   {
  4883     void *ptr = 0;
  4884     static void *sbrk_top = 0;
  4885 
  4886     if (size > 0)
  4887     {
  4888       if (size < MINIMUM_MORECORE_SIZE)
  4889          size = MINIMUM_MORECORE_SIZE;
  4890       if (CurrentExecutionLevel() == kTaskLevel)
  4891          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  4892       if (ptr == 0)
  4893       {
  4894         return (void *) MFAIL;
  4895       }
  4896       // save ptrs so they can be freed during cleanup
  4897       our_os_pools[next_os_pool] = ptr;
  4898       next_os_pool++;
  4899       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  4900       sbrk_top = (char *) ptr + size;
  4901       return ptr;
  4902     }
  4903     else if (size < 0)
  4904     {
  4905       // we don't currently support shrink behavior
  4906       return (void *) MFAIL;
  4907     }
  4908     else
  4909     {
  4910       return sbrk_top;
  4911     }
  4912   }
  4913 
  4914   // cleanup any allocated memory pools
  4915   // called as last thing before shutting down driver
  4916 
  4917   void osCleanupMem(void)
  4918   {
  4919     void **ptr;
  4920 
  4921     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  4922       if (*ptr)
  4923       {
  4924          PoolDeallocate(*ptr);
  4925          *ptr = 0;
  4926       }
  4927   }
  4928 
  4929 */
  4930 
  4931 
  4932 /* -----------------------------------------------------------------------
  4933 History:
  4934     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  4935       * Add max_footprint functions
  4936       * Ensure all appropriate literals are size_t
  4937       * Fix conditional compilation problem for some #define settings
  4938       * Avoid concatenating segments with the one provided
  4939         in create_mspace_with_base
  4940       * Rename some variables to avoid compiler shadowing warnings
  4941       * Use explicit lock initialization.
  4942       * Better handling of sbrk interference.
  4943       * Simplify and fix segment insertion, trimming and mspace_destroy
  4944       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  4945       * Thanks especially to Dennis Flanagan for help on these.
  4946 
  4947     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  4948       * Fix memalign brace error.
  4949 
  4950     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  4951       * Fix improper #endif nesting in C++
  4952       * Add explicit casts needed for C++
  4953 
  4954     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  4955       * Use trees for large bins
  4956       * Support mspaces
  4957       * Use segments to unify sbrk-based and mmap-based system allocation,
  4958         removing need for emulation on most platforms without sbrk.
  4959       * Default safety checks
  4960       * Optional footer checks. Thanks to William Robertson for the idea.
  4961       * Internal code refactoring
  4962       * Incorporate suggestions and platform-specific changes.
  4963         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  4964         Aaron Bachmann,  Emery Berger, and others.
  4965       * Speed up non-fastbin processing enough to remove fastbins.
  4966       * Remove useless cfree() to avoid conflicts with other apps.
  4967       * Remove internal memcpy, memset. Compilers handle builtins better.
  4968       * Remove some options that no one ever used and rename others.
  4969 
  4970     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  4971       * Fix malloc_state bitmap array misdeclaration
  4972 
  4973     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  4974       * Allow tuning of FIRST_SORTED_BIN_SIZE
  4975       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  4976       * Better detection and support for non-contiguousness of MORECORE.
  4977         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  4978       * Bypass most of malloc if no frees. Thanks To Emery Berger.
  4979       * Fix freeing of old top non-contiguous chunk im sysmalloc.
  4980       * Raised default trim and map thresholds to 256K.
  4981       * Fix mmap-related #defines. Thanks to Lubos Lunak.
  4982       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  4983       * Branch-free bin calculation
  4984       * Default trim and mmap thresholds now 256K.
  4985 
  4986     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  4987       * Introduce independent_comalloc and independent_calloc.
  4988         Thanks to Michael Pachos for motivation and help.
  4989       * Make optional .h file available
  4990       * Allow > 2GB requests on 32bit systems.
  4991       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  4992         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  4993         and Anonymous.
  4994       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  4995         helping test this.)
  4996       * memalign: check alignment arg
  4997       * realloc: don't try to shift chunks backwards, since this
  4998         leads to  more fragmentation in some programs and doesn't
  4999         seem to help in any others.
  5000       * Collect all cases in malloc requiring system memory into sysmalloc
  5001       * Use mmap as backup to sbrk
  5002       * Place all internal state in malloc_state
  5003       * Introduce fastbins (although similar to 2.5.1)
  5004       * Many minor tunings and cosmetic improvements
  5005       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  5006       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  5007         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  5008       * Include errno.h to support default failure action.
  5009 
  5010     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  5011       * return null for negative arguments
  5012       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  5013          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  5014           (e.g. WIN32 platforms)
  5015          * Cleanup header file inclusion for WIN32 platforms
  5016          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  5017          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  5018            memory allocation routines
  5019          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  5020          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  5021            usage of 'assert' in non-WIN32 code
  5022          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  5023            avoid infinite loop
  5024       * Always call 'fREe()' rather than 'free()'
  5025 
  5026     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  5027       * Fixed ordering problem with boundary-stamping
  5028 
  5029     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  5030       * Added pvalloc, as recommended by H.J. Liu
  5031       * Added 64bit pointer support mainly from Wolfram Gloger
  5032       * Added anonymously donated WIN32 sbrk emulation
  5033       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  5034       * malloc_extend_top: fix mask error that caused wastage after
  5035         foreign sbrks
  5036       * Add linux mremap support code from HJ Liu
  5037 
  5038     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  5039       * Integrated most documentation with the code.
  5040       * Add support for mmap, with help from
  5041         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5042       * Use last_remainder in more cases.
  5043       * Pack bins using idea from  colin@nyx10.cs.du.edu
  5044       * Use ordered bins instead of best-fit threshhold
  5045       * Eliminate block-local decls to simplify tracing and debugging.
  5046       * Support another case of realloc via move into top
  5047       * Fix error occuring when initial sbrk_base not word-aligned.
  5048       * Rely on page size for units instead of SBRK_UNIT to
  5049         avoid surprises about sbrk alignment conventions.
  5050       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  5051         (raymond@es.ele.tue.nl) for the suggestion.
  5052       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  5053       * More precautions for cases where other routines call sbrk,
  5054         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  5055       * Added macros etc., allowing use in linux libc from
  5056         H.J. Lu (hjl@gnu.ai.mit.edu)
  5057       * Inverted this history list
  5058 
  5059     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  5060       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  5061       * Removed all preallocation code since under current scheme
  5062         the work required to undo bad preallocations exceeds
  5063         the work saved in good cases for most test programs.
  5064       * No longer use return list or unconsolidated bins since
  5065         no scheme using them consistently outperforms those that don't
  5066         given above changes.
  5067       * Use best fit for very large chunks to prevent some worst-cases.
  5068       * Added some support for debugging
  5069 
  5070     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  5071       * Removed footers when chunks are in use. Thanks to
  5072         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  5073 
  5074     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  5075       * Added malloc_trim, with help from Wolfram Gloger
  5076         (wmglo@Dent.MED.Uni-Muenchen.DE).
  5077 
  5078     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  5079 
  5080     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  5081       * realloc: try to expand in both directions
  5082       * malloc: swap order of clean-bin strategy;
  5083       * realloc: only conditionally expand backwards
  5084       * Try not to scavenge used bins
  5085       * Use bin counts as a guide to preallocation
  5086       * Occasionally bin return list chunks in first scan
  5087       * Add a few optimizations from colin@nyx10.cs.du.edu
  5088 
  5089     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  5090       * faster bin computation & slightly different binning
  5091       * merged all consolidations to one part of malloc proper
  5092          (eliminating old malloc_find_space & malloc_clean_bin)
  5093       * Scan 2 returns chunks (not just 1)
  5094       * Propagate failure in realloc if malloc returns 0
  5095       * Add stuff to allow compilation on non-ANSI compilers
  5096           from kpv@research.att.com
  5097 
  5098     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  5099       * removed potential for odd address access in prev_chunk
  5100       * removed dependency on getpagesize.h
  5101       * misc cosmetics and a bit more internal documentation
  5102       * anticosmetics: mangled names in macros to evade debugger strangeness
  5103       * tested on sparc, hp-700, dec-mips, rs6000
  5104           with gcc & native cc (hp, dec only) allowing
  5105           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  5106 
  5107     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  5108       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  5109          structure of old version,  but most details differ.)
  5110  
  5111 */
  5112 
  5113 #endif /* !HAVE_MALLOC */