src/stdlib/SDL_malloc.c
changeset 1330 450721ad5436
child 1333 39b0d60d3f50
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/stdlib/SDL_malloc.c	Mon Feb 06 08:28:51 2006 +0000
     1.3 @@ -0,0 +1,5108 @@
     1.4 +/*
     1.5 +    SDL - Simple DirectMedia Layer
     1.6 +    Copyright (C) 1997-2006 Sam Lantinga
     1.7 +
     1.8 +    This library is free software; you can redistribute it and/or
     1.9 +    modify it under the terms of the GNU Lesser General Public
    1.10 +    License as published by the Free Software Foundation; either
    1.11 +    version 2.1 of the License, or (at your option) any later version.
    1.12 +
    1.13 +    This library is distributed in the hope that it will be useful,
    1.14 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.15 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    1.16 +    Lesser General Public License for more details.
    1.17 +
    1.18 +    You should have received a copy of the GNU Lesser General Public
    1.19 +    License along with this library; if not, write to the Free Software
    1.20 +    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    1.21 +
    1.22 +    Sam Lantinga
    1.23 +    slouken@libsdl.org
    1.24 +*/
    1.25 +
    1.26 +
    1.27 +/* This file contains portable memory management functions for SDL */
    1.28 +
    1.29 +#include "SDL_stdlib.h"
    1.30 +#include "SDL_string.h"
    1.31 +
    1.32 +#ifndef HAVE_MALLOC
    1.33 +
    1.34 +#define LACKS_STDIO_H
    1.35 +#define LACKS_UNISTD_H
    1.36 +#define LACKS_FCNTL_H
    1.37 +#define LACKS_SYS_PARAM_H
    1.38 +#define LACKS_SYS_MMAN_H
    1.39 +#define LACKS_STRINGS_H
    1.40 +#define LACKS_STRING_H
    1.41 +#define LACKS_SYS_TYPES_H
    1.42 +#define LACKS_ERRNO_H
    1.43 +#define LACKS_STDLIB_H
    1.44 +#define ABORT
    1.45 +
    1.46 +/*
    1.47 +  This is a version (aka dlmalloc) of malloc/free/realloc written by
    1.48 +  Doug Lea and released to the public domain, as explained at
    1.49 +  http://creativecommons.org/licenses/publicdomain.  Send questions,
    1.50 +  comments, complaints, performance data, etc to dl@cs.oswego.edu
    1.51 +
    1.52 +* Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
    1.53 +
    1.54 +   Note: There may be an updated version of this malloc obtainable at
    1.55 +           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
    1.56 +         Check before installing!
    1.57 +
    1.58 +* Quickstart
    1.59 +
    1.60 +  This library is all in one file to simplify the most common usage:
    1.61 +  ftp it, compile it (-O3), and link it into another program. All of
    1.62 +  the compile-time options default to reasonable values for use on
    1.63 +  most platforms.  You might later want to step through various
    1.64 +  compile-time and dynamic tuning options.
    1.65 +
    1.66 +  For convenience, an include file for code using this malloc is at:
    1.67 +     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
    1.68 +  You don't really need this .h file unless you call functions not
    1.69 +  defined in your system include files.  The .h file contains only the
    1.70 +  excerpts from this file needed for using this malloc on ANSI C/C++
    1.71 +  systems, so long as you haven't changed compile-time options about
    1.72 +  naming and tuning parameters.  If you do, then you can create your
    1.73 +  own malloc.h that does include all settings by cutting at the point
    1.74 +  indicated below. Note that you may already by default be using a C
    1.75 +  library containing a malloc that is based on some version of this
    1.76 +  malloc (for example in linux). You might still want to use the one
    1.77 +  in this file to customize settings or to avoid overheads associated
    1.78 +  with library versions.
    1.79 +
    1.80 +* Vital statistics:
    1.81 +
    1.82 +  Supported pointer/size_t representation:       4 or 8 bytes
    1.83 +       size_t MUST be an unsigned type of the same width as
    1.84 +       pointers. (If you are using an ancient system that declares
    1.85 +       size_t as a signed type, or need it to be a different width
    1.86 +       than pointers, you can use a previous release of this malloc
    1.87 +       (e.g. 2.7.2) supporting these.)
    1.88 +
    1.89 +  Alignment:                                     8 bytes (default)
    1.90 +       This suffices for nearly all current machines and C compilers.
    1.91 +       However, you can define MALLOC_ALIGNMENT to be wider than this
    1.92 +       if necessary (up to 128bytes), at the expense of using more space.
    1.93 +
    1.94 +  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
    1.95 +                                          8 or 16 bytes (if 8byte sizes)
    1.96 +       Each malloced chunk has a hidden word of overhead holding size
    1.97 +       and status information, and additional cross-check word
    1.98 +       if FOOTERS is defined.
    1.99 +
   1.100 +  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
   1.101 +                          8-byte ptrs:  32 bytes    (including overhead)
   1.102 +
   1.103 +       Even a request for zero bytes (i.e., malloc(0)) returns a
   1.104 +       pointer to something of the minimum allocatable size.
   1.105 +       The maximum overhead wastage (i.e., number of extra bytes
   1.106 +       allocated than were requested in malloc) is less than or equal
   1.107 +       to the minimum size, except for requests >= mmap_threshold that
   1.108 +       are serviced via mmap(), where the worst case wastage is about
   1.109 +       32 bytes plus the remainder from a system page (the minimal
   1.110 +       mmap unit); typically 4096 or 8192 bytes.
   1.111 +
   1.112 +  Security: static-safe; optionally more or less
   1.113 +       The "security" of malloc refers to the ability of malicious
   1.114 +       code to accentuate the effects of errors (for example, freeing
   1.115 +       space that is not currently malloc'ed or overwriting past the
   1.116 +       ends of chunks) in code that calls malloc.  This malloc
   1.117 +       guarantees not to modify any memory locations below the base of
   1.118 +       heap, i.e., static variables, even in the presence of usage
   1.119 +       errors.  The routines additionally detect most improper frees
   1.120 +       and reallocs.  All this holds as long as the static bookkeeping
   1.121 +       for malloc itself is not corrupted by some other means.  This
   1.122 +       is only one aspect of security -- these checks do not, and
   1.123 +       cannot, detect all possible programming errors.
   1.124 +
   1.125 +       If FOOTERS is defined nonzero, then each allocated chunk
   1.126 +       carries an additional check word to verify that it was malloced
   1.127 +       from its space.  These check words are the same within each
   1.128 +       execution of a program using malloc, but differ across
   1.129 +       executions, so externally crafted fake chunks cannot be
   1.130 +       freed. This improves security by rejecting frees/reallocs that
   1.131 +       could corrupt heap memory, in addition to the checks preventing
   1.132 +       writes to statics that are always on.  This may further improve
   1.133 +       security at the expense of time and space overhead.  (Note that
   1.134 +       FOOTERS may also be worth using with MSPACES.)
   1.135 +
   1.136 +       By default detected errors cause the program to abort (calling
   1.137 +       "abort()"). You can override this to instead proceed past
   1.138 +       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
   1.139 +       has no effect, and a malloc that encounters a bad address
   1.140 +       caused by user overwrites will ignore the bad address by
   1.141 +       dropping pointers and indices to all known memory. This may
   1.142 +       be appropriate for programs that should continue if at all
   1.143 +       possible in the face of programming errors, although they may
   1.144 +       run out of memory because dropped memory is never reclaimed.
   1.145 +
   1.146 +       If you don't like either of these options, you can define
   1.147 +       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
   1.148 +       else. And if if you are sure that your program using malloc has
   1.149 +       no errors or vulnerabilities, you can define INSECURE to 1,
   1.150 +       which might (or might not) provide a small performance improvement.
   1.151 +
   1.152 +  Thread-safety: NOT thread-safe unless USE_LOCKS defined
   1.153 +       When USE_LOCKS is defined, each public call to malloc, free,
   1.154 +       etc is surrounded with either a pthread mutex or a win32
   1.155 +       spinlock (depending on WIN32). This is not especially fast, and
   1.156 +       can be a major bottleneck.  It is designed only to provide
   1.157 +       minimal protection in concurrent environments, and to provide a
   1.158 +       basis for extensions.  If you are using malloc in a concurrent
   1.159 +       program, consider instead using ptmalloc, which is derived from
   1.160 +       a version of this malloc. (See http://www.malloc.de).
   1.161 +
   1.162 +  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
   1.163 +       This malloc can use unix sbrk or any emulation (invoked using
   1.164 +       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
   1.165 +       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
   1.166 +       memory.  On most unix systems, it tends to work best if both
   1.167 +       MORECORE and MMAP are enabled.  On Win32, it uses emulations
   1.168 +       based on VirtualAlloc. It also uses common C library functions
   1.169 +       like memset.
   1.170 +
   1.171 +  Compliance: I believe it is compliant with the Single Unix Specification
   1.172 +       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
   1.173 +       others as well.
   1.174 +
   1.175 +* Overview of algorithms
   1.176 +
   1.177 +  This is not the fastest, most space-conserving, most portable, or
   1.178 +  most tunable malloc ever written. However it is among the fastest
   1.179 +  while also being among the most space-conserving, portable and
   1.180 +  tunable.  Consistent balance across these factors results in a good
   1.181 +  general-purpose allocator for malloc-intensive programs.
   1.182 +
   1.183 +  In most ways, this malloc is a best-fit allocator. Generally, it
   1.184 +  chooses the best-fitting existing chunk for a request, with ties
   1.185 +  broken in approximately least-recently-used order. (This strategy
   1.186 +  normally maintains low fragmentation.) However, for requests less
   1.187 +  than 256bytes, it deviates from best-fit when there is not an
   1.188 +  exactly fitting available chunk by preferring to use space adjacent
   1.189 +  to that used for the previous small request, as well as by breaking
   1.190 +  ties in approximately most-recently-used order. (These enhance
   1.191 +  locality of series of small allocations.)  And for very large requests
   1.192 +  (>= 256Kb by default), it relies on system memory mapping
   1.193 +  facilities, if supported.  (This helps avoid carrying around and
   1.194 +  possibly fragmenting memory used only for large chunks.)
   1.195 +
   1.196 +  All operations (except malloc_stats and mallinfo) have execution
   1.197 +  times that are bounded by a constant factor of the number of bits in
   1.198 +  a size_t, not counting any clearing in calloc or copying in realloc,
   1.199 +  or actions surrounding MORECORE and MMAP that have times
   1.200 +  proportional to the number of non-contiguous regions returned by
   1.201 +  system allocation routines, which is often just 1.
   1.202 +
   1.203 +  The implementation is not very modular and seriously overuses
   1.204 +  macros. Perhaps someday all C compilers will do as good a job
   1.205 +  inlining modular code as can now be done by brute-force expansion,
   1.206 +  but now, enough of them seem not to.
   1.207 +
   1.208 +  Some compilers issue a lot of warnings about code that is
   1.209 +  dead/unreachable only on some platforms, and also about intentional
   1.210 +  uses of negation on unsigned types. All known cases of each can be
   1.211 +  ignored.
   1.212 +
   1.213 +  For a longer but out of date high-level description, see
   1.214 +     http://gee.cs.oswego.edu/dl/html/malloc.html
   1.215 +
   1.216 +* MSPACES
   1.217 +  If MSPACES is defined, then in addition to malloc, free, etc.,
   1.218 +  this file also defines mspace_malloc, mspace_free, etc. These
   1.219 +  are versions of malloc routines that take an "mspace" argument
   1.220 +  obtained using create_mspace, to control all internal bookkeeping.
   1.221 +  If ONLY_MSPACES is defined, only these versions are compiled.
   1.222 +  So if you would like to use this allocator for only some allocations,
   1.223 +  and your system malloc for others, you can compile with
   1.224 +  ONLY_MSPACES and then do something like...
   1.225 +    static mspace mymspace = create_mspace(0,0); // for example
   1.226 +    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
   1.227 +
   1.228 +  (Note: If you only need one instance of an mspace, you can instead
   1.229 +  use "USE_DL_PREFIX" to relabel the global malloc.)
   1.230 +
   1.231 +  You can similarly create thread-local allocators by storing
   1.232 +  mspaces as thread-locals. For example:
   1.233 +    static __thread mspace tlms = 0;
   1.234 +    void*  tlmalloc(size_t bytes) {
   1.235 +      if (tlms == 0) tlms = create_mspace(0, 0);
   1.236 +      return mspace_malloc(tlms, bytes);
   1.237 +    }
   1.238 +    void  tlfree(void* mem) { mspace_free(tlms, mem); }
   1.239 +
   1.240 +  Unless FOOTERS is defined, each mspace is completely independent.
   1.241 +  You cannot allocate from one and free to another (although
   1.242 +  conformance is only weakly checked, so usage errors are not always
   1.243 +  caught). If FOOTERS is defined, then each chunk carries around a tag
   1.244 +  indicating its originating mspace, and frees are directed to their
   1.245 +  originating spaces.
   1.246 +
   1.247 + -------------------------  Compile-time options ---------------------------
   1.248 +
   1.249 +Be careful in setting #define values for numerical constants of type
   1.250 +size_t. On some systems, literal values are not automatically extended
   1.251 +to size_t precision unless they are explicitly casted.
   1.252 +
   1.253 +WIN32                    default: defined if _WIN32 defined
   1.254 +  Defining WIN32 sets up defaults for MS environment and compilers.
   1.255 +  Otherwise defaults are for unix.
   1.256 +
   1.257 +MALLOC_ALIGNMENT         default: (size_t)8
   1.258 +  Controls the minimum alignment for malloc'ed chunks.  It must be a
   1.259 +  power of two and at least 8, even on machines for which smaller
   1.260 +  alignments would suffice. It may be defined as larger than this
   1.261 +  though. Note however that code and data structures are optimized for
   1.262 +  the case of 8-byte alignment.
   1.263 +
   1.264 +MSPACES                  default: 0 (false)
   1.265 +  If true, compile in support for independent allocation spaces.
   1.266 +  This is only supported if HAVE_MMAP is true.
   1.267 +
   1.268 +ONLY_MSPACES             default: 0 (false)
   1.269 +  If true, only compile in mspace versions, not regular versions.
   1.270 +
   1.271 +USE_LOCKS                default: 0 (false)
   1.272 +  Causes each call to each public routine to be surrounded with
   1.273 +  pthread or WIN32 mutex lock/unlock. (If set true, this can be
   1.274 +  overridden on a per-mspace basis for mspace versions.)
   1.275 +
   1.276 +FOOTERS                  default: 0
   1.277 +  If true, provide extra checking and dispatching by placing
   1.278 +  information in the footers of allocated chunks. This adds
   1.279 +  space and time overhead.
   1.280 +
   1.281 +INSECURE                 default: 0
   1.282 +  If true, omit checks for usage errors and heap space overwrites.
   1.283 +
   1.284 +USE_DL_PREFIX            default: NOT defined
   1.285 +  Causes compiler to prefix all public routines with the string 'dl'.
   1.286 +  This can be useful when you only want to use this malloc in one part
   1.287 +  of a program, using your regular system malloc elsewhere.
   1.288 +
   1.289 +ABORT                    default: defined as abort()
   1.290 +  Defines how to abort on failed checks.  On most systems, a failed
   1.291 +  check cannot die with an "assert" or even print an informative
   1.292 +  message, because the underlying print routines in turn call malloc,
   1.293 +  which will fail again.  Generally, the best policy is to simply call
   1.294 +  abort(). It's not very useful to do more than this because many
   1.295 +  errors due to overwriting will show up as address faults (null, odd
   1.296 +  addresses etc) rather than malloc-triggered checks, so will also
   1.297 +  abort.  Also, most compilers know that abort() does not return, so
   1.298 +  can better optimize code conditionally calling it.
   1.299 +
   1.300 +PROCEED_ON_ERROR           default: defined as 0 (false)
   1.301 +  Controls whether detected bad addresses cause them to bypassed
   1.302 +  rather than aborting. If set, detected bad arguments to free and
   1.303 +  realloc are ignored. And all bookkeeping information is zeroed out
   1.304 +  upon a detected overwrite of freed heap space, thus losing the
   1.305 +  ability to ever return it from malloc again, but enabling the
   1.306 +  application to proceed. If PROCEED_ON_ERROR is defined, the
   1.307 +  static variable malloc_corruption_error_count is compiled in
   1.308 +  and can be examined to see if errors have occurred. This option
   1.309 +  generates slower code than the default abort policy.
   1.310 +
   1.311 +DEBUG                    default: NOT defined
   1.312 +  The DEBUG setting is mainly intended for people trying to modify
   1.313 +  this code or diagnose problems when porting to new platforms.
   1.314 +  However, it may also be able to better isolate user errors than just
   1.315 +  using runtime checks.  The assertions in the check routines spell
   1.316 +  out in more detail the assumptions and invariants underlying the
   1.317 +  algorithms.  The checking is fairly extensive, and will slow down
   1.318 +  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
   1.319 +  set will attempt to check every non-mmapped allocated and free chunk
   1.320 +  in the course of computing the summaries.
   1.321 +
   1.322 +ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
   1.323 +  Debugging assertion failures can be nearly impossible if your
   1.324 +  version of the assert macro causes malloc to be called, which will
   1.325 +  lead to a cascade of further failures, blowing the runtime stack.
   1.326 +  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
   1.327 +  which will usually make debugging easier.
   1.328 +
   1.329 +MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
   1.330 +  The action to take before "return 0" when malloc fails to be able to
   1.331 +  return memory because there is none available.
   1.332 +
   1.333 +HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
   1.334 +  True if this system supports sbrk or an emulation of it.
   1.335 +
   1.336 +MORECORE                  default: sbrk
   1.337 +  The name of the sbrk-style system routine to call to obtain more
   1.338 +  memory.  See below for guidance on writing custom MORECORE
   1.339 +  functions. The type of the argument to sbrk/MORECORE varies across
   1.340 +  systems.  It cannot be size_t, because it supports negative
   1.341 +  arguments, so it is normally the signed type of the same width as
   1.342 +  size_t (sometimes declared as "intptr_t").  It doesn't much matter
   1.343 +  though. Internally, we only call it with arguments less than half
   1.344 +  the max value of a size_t, which should work across all reasonable
   1.345 +  possibilities, although sometimes generating compiler warnings.  See
   1.346 +  near the end of this file for guidelines for creating a custom
   1.347 +  version of MORECORE.
   1.348 +
   1.349 +MORECORE_CONTIGUOUS       default: 1 (true)
   1.350 +  If true, take advantage of fact that consecutive calls to MORECORE
   1.351 +  with positive arguments always return contiguous increasing
   1.352 +  addresses.  This is true of unix sbrk. It does not hurt too much to
   1.353 +  set it true anyway, since malloc copes with non-contiguities.
   1.354 +  Setting it false when definitely non-contiguous saves time
   1.355 +  and possibly wasted space it would take to discover this though.
   1.356 +
   1.357 +MORECORE_CANNOT_TRIM      default: NOT defined
   1.358 +  True if MORECORE cannot release space back to the system when given
   1.359 +  negative arguments. This is generally necessary only if you are
   1.360 +  using a hand-crafted MORECORE function that cannot handle negative
   1.361 +  arguments.
   1.362 +
   1.363 +HAVE_MMAP                 default: 1 (true)
   1.364 +  True if this system supports mmap or an emulation of it.  If so, and
   1.365 +  HAVE_MORECORE is not true, MMAP is used for all system
   1.366 +  allocation. If set and HAVE_MORECORE is true as well, MMAP is
   1.367 +  primarily used to directly allocate very large blocks. It is also
   1.368 +  used as a backup strategy in cases where MORECORE fails to provide
   1.369 +  space from system. Note: A single call to MUNMAP is assumed to be
   1.370 +  able to unmap memory that may have be allocated using multiple calls
   1.371 +  to MMAP, so long as they are adjacent.
   1.372 +
   1.373 +HAVE_MREMAP               default: 1 on linux, else 0
   1.374 +  If true realloc() uses mremap() to re-allocate large blocks and
   1.375 +  extend or shrink allocation spaces.
   1.376 +
   1.377 +MMAP_CLEARS               default: 1 on unix
   1.378 +  True if mmap clears memory so calloc doesn't need to. This is true
   1.379 +  for standard unix mmap using /dev/zero.
   1.380 +
   1.381 +USE_BUILTIN_FFS            default: 0 (i.e., not used)
   1.382 +  Causes malloc to use the builtin ffs() function to compute indices.
   1.383 +  Some compilers may recognize and intrinsify ffs to be faster than the
   1.384 +  supplied C version. Also, the case of x86 using gcc is special-cased
   1.385 +  to an asm instruction, so is already as fast as it can be, and so
   1.386 +  this setting has no effect. (On most x86s, the asm version is only
   1.387 +  slightly faster than the C version.)
   1.388 +
   1.389 +malloc_getpagesize         default: derive from system includes, or 4096.
   1.390 +  The system page size. To the extent possible, this malloc manages
   1.391 +  memory from the system in page-size units.  This may be (and
   1.392 +  usually is) a function rather than a constant. This is ignored
   1.393 +  if WIN32, where page size is determined using getSystemInfo during
   1.394 +  initialization.
   1.395 +
   1.396 +USE_DEV_RANDOM             default: 0 (i.e., not used)
   1.397 +  Causes malloc to use /dev/random to initialize secure magic seed for
   1.398 +  stamping footers. Otherwise, the current time is used.
   1.399 +
   1.400 +NO_MALLINFO                default: 0
   1.401 +  If defined, don't compile "mallinfo". This can be a simple way
   1.402 +  of dealing with mismatches between system declarations and
   1.403 +  those in this file.
   1.404 +
   1.405 +MALLINFO_FIELD_TYPE        default: size_t
   1.406 +  The type of the fields in the mallinfo struct. This was originally
   1.407 +  defined as "int" in SVID etc, but is more usefully defined as
   1.408 +  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
   1.409 +
   1.410 +REALLOC_ZERO_BYTES_FREES    default: not defined
   1.411 +  This should be set if a call to realloc with zero bytes should 
   1.412 +  be the same as a call to free. Some people think it should. Otherwise, 
   1.413 +  since this malloc returns a unique pointer for malloc(0), so does 
   1.414 +  realloc(p, 0).
   1.415 +
   1.416 +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
   1.417 +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
   1.418 +LACKS_STDLIB_H                default: NOT defined unless on WIN32
   1.419 +  Define these if your system does not have these header files.
   1.420 +  You might need to manually insert some of the declarations they provide.
   1.421 +
   1.422 +DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
   1.423 +                                system_info.dwAllocationGranularity in WIN32,
   1.424 +                                otherwise 64K.
   1.425 +      Also settable using mallopt(M_GRANULARITY, x)
   1.426 +  The unit for allocating and deallocating memory from the system.  On
   1.427 +  most systems with contiguous MORECORE, there is no reason to
   1.428 +  make this more than a page. However, systems with MMAP tend to
   1.429 +  either require or encourage larger granularities.  You can increase
   1.430 +  this value to prevent system allocation functions to be called so
   1.431 +  often, especially if they are slow.  The value must be at least one
   1.432 +  page and must be a power of two.  Setting to 0 causes initialization
   1.433 +  to either page size or win32 region size.  (Note: In previous
   1.434 +  versions of malloc, the equivalent of this option was called
   1.435 +  "TOP_PAD")
   1.436 +
   1.437 +DEFAULT_TRIM_THRESHOLD    default: 2MB
   1.438 +      Also settable using mallopt(M_TRIM_THRESHOLD, x)
   1.439 +  The maximum amount of unused top-most memory to keep before
   1.440 +  releasing via malloc_trim in free().  Automatic trimming is mainly
   1.441 +  useful in long-lived programs using contiguous MORECORE.  Because
   1.442 +  trimming via sbrk can be slow on some systems, and can sometimes be
   1.443 +  wasteful (in cases where programs immediately afterward allocate
   1.444 +  more large chunks) the value should be high enough so that your
   1.445 +  overall system performance would improve by releasing this much
   1.446 +  memory.  As a rough guide, you might set to a value close to the
   1.447 +  average size of a process (program) running on your system.
   1.448 +  Releasing this much memory would allow such a process to run in
   1.449 +  memory.  Generally, it is worth tuning trim thresholds when a
   1.450 +  program undergoes phases where several large chunks are allocated
   1.451 +  and released in ways that can reuse each other's storage, perhaps
   1.452 +  mixed with phases where there are no such chunks at all. The trim
   1.453 +  value must be greater than page size to have any useful effect.  To
   1.454 +  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
   1.455 +  some people use of mallocing a huge space and then freeing it at
   1.456 +  program startup, in an attempt to reserve system memory, doesn't
   1.457 +  have the intended effect under automatic trimming, since that memory
   1.458 +  will immediately be returned to the system.
   1.459 +
   1.460 +DEFAULT_MMAP_THRESHOLD       default: 256K
   1.461 +      Also settable using mallopt(M_MMAP_THRESHOLD, x)
   1.462 +  The request size threshold for using MMAP to directly service a
   1.463 +  request. Requests of at least this size that cannot be allocated
   1.464 +  using already-existing space will be serviced via mmap.  (If enough
   1.465 +  normal freed space already exists it is used instead.)  Using mmap
   1.466 +  segregates relatively large chunks of memory so that they can be
   1.467 +  individually obtained and released from the host system. A request
   1.468 +  serviced through mmap is never reused by any other request (at least
   1.469 +  not directly; the system may just so happen to remap successive
   1.470 +  requests to the same locations).  Segregating space in this way has
   1.471 +  the benefits that: Mmapped space can always be individually released
   1.472 +  back to the system, which helps keep the system level memory demands
   1.473 +  of a long-lived program low.  Also, mapped memory doesn't become
   1.474 +  `locked' between other chunks, as can happen with normally allocated
   1.475 +  chunks, which means that even trimming via malloc_trim would not
   1.476 +  release them.  However, it has the disadvantage that the space
   1.477 +  cannot be reclaimed, consolidated, and then used to service later
   1.478 +  requests, as happens with normal chunks.  The advantages of mmap
   1.479 +  nearly always outweigh disadvantages for "large" chunks, but the
   1.480 +  value of "large" may vary across systems.  The default is an
   1.481 +  empirically derived value that works well in most systems. You can
   1.482 +  disable mmap by setting to MAX_SIZE_T.
   1.483 +
   1.484 +*/
   1.485 +
   1.486 +#ifndef WIN32
   1.487 +#ifdef _WIN32
   1.488 +#define WIN32 1
   1.489 +#endif  /* _WIN32 */
   1.490 +#endif  /* WIN32 */
   1.491 +#ifdef WIN32
   1.492 +#include "SDL_windows.h"
   1.493 +#define HAVE_MMAP 1
   1.494 +#define HAVE_MORECORE 0
   1.495 +#define LACKS_UNISTD_H
   1.496 +#define LACKS_SYS_PARAM_H
   1.497 +#define LACKS_SYS_MMAN_H
   1.498 +#define LACKS_STRING_H
   1.499 +#define LACKS_STRINGS_H
   1.500 +#define LACKS_SYS_TYPES_H
   1.501 +#define LACKS_ERRNO_H
   1.502 +#define MALLOC_FAILURE_ACTION
   1.503 +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   1.504 +#endif  /* WIN32 */
   1.505 +
   1.506 +#if defined(DARWIN) || defined(_DARWIN)
   1.507 +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   1.508 +#ifndef HAVE_MORECORE
   1.509 +#define HAVE_MORECORE 0
   1.510 +#define HAVE_MMAP 1
   1.511 +#endif  /* HAVE_MORECORE */
   1.512 +#endif  /* DARWIN */
   1.513 +
   1.514 +#ifndef LACKS_SYS_TYPES_H
   1.515 +#include <sys/types.h>  /* For size_t */
   1.516 +#endif  /* LACKS_SYS_TYPES_H */
   1.517 +
   1.518 +/* The maximum possible size_t value has all bits set */
   1.519 +#define MAX_SIZE_T           (~(size_t)0)
   1.520 +
   1.521 +#ifndef ONLY_MSPACES
   1.522 +#define ONLY_MSPACES 0
   1.523 +#endif  /* ONLY_MSPACES */
   1.524 +#ifndef MSPACES
   1.525 +#if ONLY_MSPACES
   1.526 +#define MSPACES 1
   1.527 +#else   /* ONLY_MSPACES */
   1.528 +#define MSPACES 0
   1.529 +#endif  /* ONLY_MSPACES */
   1.530 +#endif  /* MSPACES */
   1.531 +#ifndef MALLOC_ALIGNMENT
   1.532 +#define MALLOC_ALIGNMENT ((size_t)8U)
   1.533 +#endif  /* MALLOC_ALIGNMENT */
   1.534 +#ifndef FOOTERS
   1.535 +#define FOOTERS 0
   1.536 +#endif  /* FOOTERS */
   1.537 +#ifndef ABORT
   1.538 +#define ABORT  abort()
   1.539 +#endif  /* ABORT */
   1.540 +#ifndef ABORT_ON_ASSERT_FAILURE
   1.541 +#define ABORT_ON_ASSERT_FAILURE 1
   1.542 +#endif  /* ABORT_ON_ASSERT_FAILURE */
   1.543 +#ifndef PROCEED_ON_ERROR
   1.544 +#define PROCEED_ON_ERROR 0
   1.545 +#endif  /* PROCEED_ON_ERROR */
   1.546 +#ifndef USE_LOCKS
   1.547 +#define USE_LOCKS 0
   1.548 +#endif  /* USE_LOCKS */
   1.549 +#ifndef INSECURE
   1.550 +#define INSECURE 0
   1.551 +#endif  /* INSECURE */
   1.552 +#ifndef HAVE_MMAP
   1.553 +#define HAVE_MMAP 1
   1.554 +#endif  /* HAVE_MMAP */
   1.555 +#ifndef MMAP_CLEARS
   1.556 +#define MMAP_CLEARS 1
   1.557 +#endif  /* MMAP_CLEARS */
   1.558 +#ifndef HAVE_MREMAP
   1.559 +#ifdef linux
   1.560 +#define HAVE_MREMAP 1
   1.561 +#else   /* linux */
   1.562 +#define HAVE_MREMAP 0
   1.563 +#endif  /* linux */
   1.564 +#endif  /* HAVE_MREMAP */
   1.565 +#ifndef MALLOC_FAILURE_ACTION
   1.566 +#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   1.567 +#endif  /* MALLOC_FAILURE_ACTION */
   1.568 +#ifndef HAVE_MORECORE
   1.569 +#if ONLY_MSPACES
   1.570 +#define HAVE_MORECORE 0
   1.571 +#else   /* ONLY_MSPACES */
   1.572 +#define HAVE_MORECORE 1
   1.573 +#endif  /* ONLY_MSPACES */
   1.574 +#endif  /* HAVE_MORECORE */
   1.575 +#if !HAVE_MORECORE
   1.576 +#define MORECORE_CONTIGUOUS 0
   1.577 +#else   /* !HAVE_MORECORE */
   1.578 +#ifndef MORECORE
   1.579 +#define MORECORE sbrk
   1.580 +#endif  /* MORECORE */
   1.581 +#ifndef MORECORE_CONTIGUOUS
   1.582 +#define MORECORE_CONTIGUOUS 1
   1.583 +#endif  /* MORECORE_CONTIGUOUS */
   1.584 +#endif  /* HAVE_MORECORE */
   1.585 +#ifndef DEFAULT_GRANULARITY
   1.586 +#if MORECORE_CONTIGUOUS
   1.587 +#define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   1.588 +#else   /* MORECORE_CONTIGUOUS */
   1.589 +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   1.590 +#endif  /* MORECORE_CONTIGUOUS */
   1.591 +#endif  /* DEFAULT_GRANULARITY */
   1.592 +#ifndef DEFAULT_TRIM_THRESHOLD
   1.593 +#ifndef MORECORE_CANNOT_TRIM
   1.594 +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   1.595 +#else   /* MORECORE_CANNOT_TRIM */
   1.596 +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   1.597 +#endif  /* MORECORE_CANNOT_TRIM */
   1.598 +#endif  /* DEFAULT_TRIM_THRESHOLD */
   1.599 +#ifndef DEFAULT_MMAP_THRESHOLD
   1.600 +#if HAVE_MMAP
   1.601 +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   1.602 +#else   /* HAVE_MMAP */
   1.603 +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   1.604 +#endif  /* HAVE_MMAP */
   1.605 +#endif  /* DEFAULT_MMAP_THRESHOLD */
   1.606 +#ifndef USE_BUILTIN_FFS
   1.607 +#define USE_BUILTIN_FFS 0
   1.608 +#endif  /* USE_BUILTIN_FFS */
   1.609 +#ifndef USE_DEV_RANDOM
   1.610 +#define USE_DEV_RANDOM 0
   1.611 +#endif  /* USE_DEV_RANDOM */
   1.612 +#ifndef NO_MALLINFO
   1.613 +#define NO_MALLINFO 0
   1.614 +#endif  /* NO_MALLINFO */
   1.615 +#ifndef MALLINFO_FIELD_TYPE
   1.616 +#define MALLINFO_FIELD_TYPE size_t
   1.617 +#endif  /* MALLINFO_FIELD_TYPE */
   1.618 +
   1.619 +/*
   1.620 +  mallopt tuning options.  SVID/XPG defines four standard parameter
   1.621 +  numbers for mallopt, normally defined in malloc.h.  None of these
   1.622 +  are used in this malloc, so setting them has no effect. But this
   1.623 +  malloc does support the following options.
   1.624 +*/
   1.625 +
   1.626 +#define M_TRIM_THRESHOLD     (-1)
   1.627 +#define M_GRANULARITY        (-2)
   1.628 +#define M_MMAP_THRESHOLD     (-3)
   1.629 +
   1.630 +/* ------------------------ Mallinfo declarations ------------------------ */
   1.631 +
   1.632 +#if !NO_MALLINFO
   1.633 +/*
   1.634 +  This version of malloc supports the standard SVID/XPG mallinfo
   1.635 +  routine that returns a struct containing usage properties and
   1.636 +  statistics. It should work on any system that has a
   1.637 +  /usr/include/malloc.h defining struct mallinfo.  The main
   1.638 +  declaration needed is the mallinfo struct that is returned (by-copy)
   1.639 +  by mallinfo().  The malloinfo struct contains a bunch of fields that
   1.640 +  are not even meaningful in this version of malloc.  These fields are
   1.641 +  are instead filled by mallinfo() with other numbers that might be of
   1.642 +  interest.
   1.643 +
   1.644 +  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
   1.645 +  /usr/include/malloc.h file that includes a declaration of struct
   1.646 +  mallinfo.  If so, it is included; else a compliant version is
   1.647 +  declared below.  These must be precisely the same for mallinfo() to
   1.648 +  work.  The original SVID version of this struct, defined on most
   1.649 +  systems with mallinfo, declares all fields as ints. But some others
   1.650 +  define as unsigned long. If your system defines the fields using a
   1.651 +  type of different width than listed here, you MUST #include your
   1.652 +  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
   1.653 +*/
   1.654 +
   1.655 +/* #define HAVE_USR_INCLUDE_MALLOC_H */
   1.656 +
   1.657 +#ifdef HAVE_USR_INCLUDE_MALLOC_H
   1.658 +#include "/usr/include/malloc.h"
   1.659 +#else /* HAVE_USR_INCLUDE_MALLOC_H */
   1.660 +
   1.661 +struct mallinfo {
   1.662 +  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   1.663 +  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   1.664 +  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   1.665 +  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   1.666 +  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   1.667 +  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   1.668 +  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   1.669 +  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   1.670 +  MALLINFO_FIELD_TYPE fordblks; /* total free space */
   1.671 +  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   1.672 +};
   1.673 +
   1.674 +#endif /* HAVE_USR_INCLUDE_MALLOC_H */
   1.675 +#endif /* NO_MALLINFO */
   1.676 +
   1.677 +#ifdef __cplusplus
   1.678 +extern "C" {
   1.679 +#endif /* __cplusplus */
   1.680 +
   1.681 +#if !ONLY_MSPACES
   1.682 +
   1.683 +/* ------------------- Declarations of public routines ------------------- */
   1.684 +
   1.685 +#ifndef USE_DL_PREFIX
   1.686 +#define dlcalloc               calloc
   1.687 +#define dlfree                 free
   1.688 +#define dlmalloc               malloc
   1.689 +#define dlmemalign             memalign
   1.690 +#define dlrealloc              realloc
   1.691 +#define dlvalloc               valloc
   1.692 +#define dlpvalloc              pvalloc
   1.693 +#define dlmallinfo             mallinfo
   1.694 +#define dlmallopt              mallopt
   1.695 +#define dlmalloc_trim          malloc_trim
   1.696 +#define dlmalloc_stats         malloc_stats
   1.697 +#define dlmalloc_usable_size   malloc_usable_size
   1.698 +#define dlmalloc_footprint     malloc_footprint
   1.699 +#define dlmalloc_max_footprint malloc_max_footprint
   1.700 +#define dlindependent_calloc   independent_calloc
   1.701 +#define dlindependent_comalloc independent_comalloc
   1.702 +#endif /* USE_DL_PREFIX */
   1.703 +
   1.704 +
   1.705 +/*
   1.706 +  malloc(size_t n)
   1.707 +  Returns a pointer to a newly allocated chunk of at least n bytes, or
   1.708 +  null if no space is available, in which case errno is set to ENOMEM
   1.709 +  on ANSI C systems.
   1.710 +
   1.711 +  If n is zero, malloc returns a minimum-sized chunk. (The minimum
   1.712 +  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
   1.713 +  systems.)  Note that size_t is an unsigned type, so calls with
   1.714 +  arguments that would be negative if signed are interpreted as
   1.715 +  requests for huge amounts of space, which will often fail. The
   1.716 +  maximum supported value of n differs across systems, but is in all
   1.717 +  cases less than the maximum representable value of a size_t.
   1.718 +*/
   1.719 +void* dlmalloc(size_t);
   1.720 +
   1.721 +/*
   1.722 +  free(void* p)
   1.723 +  Releases the chunk of memory pointed to by p, that had been previously
   1.724 +  allocated using malloc or a related routine such as realloc.
   1.725 +  It has no effect if p is null. If p was not malloced or already
   1.726 +  freed, free(p) will by default cause the current program to abort.
   1.727 +*/
   1.728 +void  dlfree(void*);
   1.729 +
   1.730 +/*
   1.731 +  calloc(size_t n_elements, size_t element_size);
   1.732 +  Returns a pointer to n_elements * element_size bytes, with all locations
   1.733 +  set to zero.
   1.734 +*/
   1.735 +void* dlcalloc(size_t, size_t);
   1.736 +
   1.737 +/*
   1.738 +  realloc(void* p, size_t n)
   1.739 +  Returns a pointer to a chunk of size n that contains the same data
   1.740 +  as does chunk p up to the minimum of (n, p's size) bytes, or null
   1.741 +  if no space is available.
   1.742 +
   1.743 +  The returned pointer may or may not be the same as p. The algorithm
   1.744 +  prefers extending p in most cases when possible, otherwise it
   1.745 +  employs the equivalent of a malloc-copy-free sequence.
   1.746 +
   1.747 +  If p is null, realloc is equivalent to malloc.
   1.748 +
   1.749 +  If space is not available, realloc returns null, errno is set (if on
   1.750 +  ANSI) and p is NOT freed.
   1.751 +
   1.752 +  if n is for fewer bytes than already held by p, the newly unused
   1.753 +  space is lopped off and freed if possible.  realloc with a size
   1.754 +  argument of zero (re)allocates a minimum-sized chunk.
   1.755 +
   1.756 +  The old unix realloc convention of allowing the last-free'd chunk
   1.757 +  to be used as an argument to realloc is not supported.
   1.758 +*/
   1.759 +
   1.760 +void* dlrealloc(void*, size_t);
   1.761 +
   1.762 +/*
   1.763 +  memalign(size_t alignment, size_t n);
   1.764 +  Returns a pointer to a newly allocated chunk of n bytes, aligned
   1.765 +  in accord with the alignment argument.
   1.766 +
   1.767 +  The alignment argument should be a power of two. If the argument is
   1.768 +  not a power of two, the nearest greater power is used.
   1.769 +  8-byte alignment is guaranteed by normal malloc calls, so don't
   1.770 +  bother calling memalign with an argument of 8 or less.
   1.771 +
   1.772 +  Overreliance on memalign is a sure way to fragment space.
   1.773 +*/
   1.774 +void* dlmemalign(size_t, size_t);
   1.775 +
   1.776 +/*
   1.777 +  valloc(size_t n);
   1.778 +  Equivalent to memalign(pagesize, n), where pagesize is the page
   1.779 +  size of the system. If the pagesize is unknown, 4096 is used.
   1.780 +*/
   1.781 +void* dlvalloc(size_t);
   1.782 +
   1.783 +/*
   1.784 +  mallopt(int parameter_number, int parameter_value)
   1.785 +  Sets tunable parameters The format is to provide a
   1.786 +  (parameter-number, parameter-value) pair.  mallopt then sets the
   1.787 +  corresponding parameter to the argument value if it can (i.e., so
   1.788 +  long as the value is meaningful), and returns 1 if successful else
   1.789 +  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
   1.790 +  normally defined in malloc.h.  None of these are use in this malloc,
   1.791 +  so setting them has no effect. But this malloc also supports other
   1.792 +  options in mallopt. See below for details.  Briefly, supported
   1.793 +  parameters are as follows (listed defaults are for "typical"
   1.794 +  configurations).
   1.795 +
   1.796 +  Symbol            param #  default    allowed param values
   1.797 +  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   1.798 +  M_GRANULARITY        -2     page size   any power of 2 >= page size
   1.799 +  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   1.800 +*/
   1.801 +int dlmallopt(int, int);
   1.802 +
   1.803 +/*
   1.804 +  malloc_footprint();
   1.805 +  Returns the number of bytes obtained from the system.  The total
   1.806 +  number of bytes allocated by malloc, realloc etc., is less than this
   1.807 +  value. Unlike mallinfo, this function returns only a precomputed
   1.808 +  result, so can be called frequently to monitor memory consumption.
   1.809 +  Even if locks are otherwise defined, this function does not use them,
   1.810 +  so results might not be up to date.
   1.811 +*/
   1.812 +size_t dlmalloc_footprint(void);
   1.813 +
   1.814 +/*
   1.815 +  malloc_max_footprint();
   1.816 +  Returns the maximum number of bytes obtained from the system. This
   1.817 +  value will be greater than current footprint if deallocated space
   1.818 +  has been reclaimed by the system. The peak number of bytes allocated
   1.819 +  by malloc, realloc etc., is less than this value. Unlike mallinfo,
   1.820 +  this function returns only a precomputed result, so can be called
   1.821 +  frequently to monitor memory consumption.  Even if locks are
   1.822 +  otherwise defined, this function does not use them, so results might
   1.823 +  not be up to date.
   1.824 +*/
   1.825 +size_t dlmalloc_max_footprint(void);
   1.826 +
   1.827 +#if !NO_MALLINFO
   1.828 +/*
   1.829 +  mallinfo()
   1.830 +  Returns (by copy) a struct containing various summary statistics:
   1.831 +
   1.832 +  arena:     current total non-mmapped bytes allocated from system
   1.833 +  ordblks:   the number of free chunks
   1.834 +  smblks:    always zero.
   1.835 +  hblks:     current number of mmapped regions
   1.836 +  hblkhd:    total bytes held in mmapped regions
   1.837 +  usmblks:   the maximum total allocated space. This will be greater
   1.838 +                than current total if trimming has occurred.
   1.839 +  fsmblks:   always zero
   1.840 +  uordblks:  current total allocated space (normal or mmapped)
   1.841 +  fordblks:  total free space
   1.842 +  keepcost:  the maximum number of bytes that could ideally be released
   1.843 +               back to system via malloc_trim. ("ideally" means that
   1.844 +               it ignores page restrictions etc.)
   1.845 +
   1.846 +  Because these fields are ints, but internal bookkeeping may
   1.847 +  be kept as longs, the reported values may wrap around zero and
   1.848 +  thus be inaccurate.
   1.849 +*/
   1.850 +struct mallinfo dlmallinfo(void);
   1.851 +#endif /* NO_MALLINFO */
   1.852 +
   1.853 +/*
   1.854 +  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   1.855 +
   1.856 +  independent_calloc is similar to calloc, but instead of returning a
   1.857 +  single cleared space, it returns an array of pointers to n_elements
   1.858 +  independent elements that can hold contents of size elem_size, each
   1.859 +  of which starts out cleared, and can be independently freed,
   1.860 +  realloc'ed etc. The elements are guaranteed to be adjacently
   1.861 +  allocated (this is not guaranteed to occur with multiple callocs or
   1.862 +  mallocs), which may also improve cache locality in some
   1.863 +  applications.
   1.864 +
   1.865 +  The "chunks" argument is optional (i.e., may be null, which is
   1.866 +  probably the most typical usage). If it is null, the returned array
   1.867 +  is itself dynamically allocated and should also be freed when it is
   1.868 +  no longer needed. Otherwise, the chunks array must be of at least
   1.869 +  n_elements in length. It is filled in with the pointers to the
   1.870 +  chunks.
   1.871 +
   1.872 +  In either case, independent_calloc returns this pointer array, or
   1.873 +  null if the allocation failed.  If n_elements is zero and "chunks"
   1.874 +  is null, it returns a chunk representing an array with zero elements
   1.875 +  (which should be freed if not wanted).
   1.876 +
   1.877 +  Each element must be individually freed when it is no longer
   1.878 +  needed. If you'd like to instead be able to free all at once, you
   1.879 +  should instead use regular calloc and assign pointers into this
   1.880 +  space to represent elements.  (In this case though, you cannot
   1.881 +  independently free elements.)
   1.882 +
   1.883 +  independent_calloc simplifies and speeds up implementations of many
   1.884 +  kinds of pools.  It may also be useful when constructing large data
   1.885 +  structures that initially have a fixed number of fixed-sized nodes,
   1.886 +  but the number is not known at compile time, and some of the nodes
   1.887 +  may later need to be freed. For example:
   1.888 +
   1.889 +  struct Node { int item; struct Node* next; };
   1.890 +
   1.891 +  struct Node* build_list() {
   1.892 +    struct Node** pool;
   1.893 +    int n = read_number_of_nodes_needed();
   1.894 +    if (n <= 0) return 0;
   1.895 +    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
   1.896 +    if (pool == 0) die();
   1.897 +    // organize into a linked list...
   1.898 +    struct Node* first = pool[0];
   1.899 +    for (i = 0; i < n-1; ++i)
   1.900 +      pool[i]->next = pool[i+1];
   1.901 +    free(pool);     // Can now free the array (or not, if it is needed later)
   1.902 +    return first;
   1.903 +  }
   1.904 +*/
   1.905 +void** dlindependent_calloc(size_t, size_t, void**);
   1.906 +
   1.907 +/*
   1.908 +  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   1.909 +
   1.910 +  independent_comalloc allocates, all at once, a set of n_elements
   1.911 +  chunks with sizes indicated in the "sizes" array.    It returns
   1.912 +  an array of pointers to these elements, each of which can be
   1.913 +  independently freed, realloc'ed etc. The elements are guaranteed to
   1.914 +  be adjacently allocated (this is not guaranteed to occur with
   1.915 +  multiple callocs or mallocs), which may also improve cache locality
   1.916 +  in some applications.
   1.917 +
   1.918 +  The "chunks" argument is optional (i.e., may be null). If it is null
   1.919 +  the returned array is itself dynamically allocated and should also
   1.920 +  be freed when it is no longer needed. Otherwise, the chunks array
   1.921 +  must be of at least n_elements in length. It is filled in with the
   1.922 +  pointers to the chunks.
   1.923 +
   1.924 +  In either case, independent_comalloc returns this pointer array, or
   1.925 +  null if the allocation failed.  If n_elements is zero and chunks is
   1.926 +  null, it returns a chunk representing an array with zero elements
   1.927 +  (which should be freed if not wanted).
   1.928 +
   1.929 +  Each element must be individually freed when it is no longer
   1.930 +  needed. If you'd like to instead be able to free all at once, you
   1.931 +  should instead use a single regular malloc, and assign pointers at
   1.932 +  particular offsets in the aggregate space. (In this case though, you
   1.933 +  cannot independently free elements.)
   1.934 +
   1.935 +  independent_comallac differs from independent_calloc in that each
   1.936 +  element may have a different size, and also that it does not
   1.937 +  automatically clear elements.
   1.938 +
   1.939 +  independent_comalloc can be used to speed up allocation in cases
   1.940 +  where several structs or objects must always be allocated at the
   1.941 +  same time.  For example:
   1.942 +
   1.943 +  struct Head { ... }
   1.944 +  struct Foot { ... }
   1.945 +
   1.946 +  void send_message(char* msg) {
   1.947 +    int msglen = strlen(msg);
   1.948 +    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
   1.949 +    void* chunks[3];
   1.950 +    if (independent_comalloc(3, sizes, chunks) == 0)
   1.951 +      die();
   1.952 +    struct Head* head = (struct Head*)(chunks[0]);
   1.953 +    char*        body = (char*)(chunks[1]);
   1.954 +    struct Foot* foot = (struct Foot*)(chunks[2]);
   1.955 +    // ...
   1.956 +  }
   1.957 +
   1.958 +  In general though, independent_comalloc is worth using only for
   1.959 +  larger values of n_elements. For small values, you probably won't
   1.960 +  detect enough difference from series of malloc calls to bother.
   1.961 +
   1.962 +  Overuse of independent_comalloc can increase overall memory usage,
   1.963 +  since it cannot reuse existing noncontiguous small chunks that
   1.964 +  might be available for some of the elements.
   1.965 +*/
   1.966 +void** dlindependent_comalloc(size_t, size_t*, void**);
   1.967 +
   1.968 +
   1.969 +/*
   1.970 +  pvalloc(size_t n);
   1.971 +  Equivalent to valloc(minimum-page-that-holds(n)), that is,
   1.972 +  round up n to nearest pagesize.
   1.973 + */
   1.974 +void*  dlpvalloc(size_t);
   1.975 +
   1.976 +/*
   1.977 +  malloc_trim(size_t pad);
   1.978 +
   1.979 +  If possible, gives memory back to the system (via negative arguments
   1.980 +  to sbrk) if there is unused memory at the `high' end of the malloc
   1.981 +  pool or in unused MMAP segments. You can call this after freeing
   1.982 +  large blocks of memory to potentially reduce the system-level memory
   1.983 +  requirements of a program. However, it cannot guarantee to reduce
   1.984 +  memory. Under some allocation patterns, some large free blocks of
   1.985 +  memory will be locked between two used chunks, so they cannot be
   1.986 +  given back to the system.
   1.987 +
   1.988 +  The `pad' argument to malloc_trim represents the amount of free
   1.989 +  trailing space to leave untrimmed. If this argument is zero, only
   1.990 +  the minimum amount of memory to maintain internal data structures
   1.991 +  will be left. Non-zero arguments can be supplied to maintain enough
   1.992 +  trailing space to service future expected allocations without having
   1.993 +  to re-obtain memory from the system.
   1.994 +
   1.995 +  Malloc_trim returns 1 if it actually released any memory, else 0.
   1.996 +*/
   1.997 +int  dlmalloc_trim(size_t);
   1.998 +
   1.999 +/*
  1.1000 +  malloc_usable_size(void* p);
  1.1001 +
  1.1002 +  Returns the number of bytes you can actually use in
  1.1003 +  an allocated chunk, which may be more than you requested (although
  1.1004 +  often not) due to alignment and minimum size constraints.
  1.1005 +  You can use this many bytes without worrying about
  1.1006 +  overwriting other allocated objects. This is not a particularly great
  1.1007 +  programming practice. malloc_usable_size can be more useful in
  1.1008 +  debugging and assertions, for example:
  1.1009 +
  1.1010 +  p = malloc(n);
  1.1011 +  assert(malloc_usable_size(p) >= 256);
  1.1012 +*/
  1.1013 +size_t dlmalloc_usable_size(void*);
  1.1014 +
  1.1015 +/*
  1.1016 +  malloc_stats();
  1.1017 +  Prints on stderr the amount of space obtained from the system (both
  1.1018 +  via sbrk and mmap), the maximum amount (which may be more than
  1.1019 +  current if malloc_trim and/or munmap got called), and the current
  1.1020 +  number of bytes allocated via malloc (or realloc, etc) but not yet
  1.1021 +  freed. Note that this is the number of bytes allocated, not the
  1.1022 +  number requested. It will be larger than the number requested
  1.1023 +  because of alignment and bookkeeping overhead. Because it includes
  1.1024 +  alignment wastage as being in use, this figure may be greater than
  1.1025 +  zero even when no user-level chunks are allocated.
  1.1026 +
  1.1027 +  The reported current and maximum system memory can be inaccurate if
  1.1028 +  a program makes other calls to system memory allocation functions
  1.1029 +  (normally sbrk) outside of malloc.
  1.1030 +
  1.1031 +  malloc_stats prints only the most commonly interesting statistics.
  1.1032 +  More information can be obtained by calling mallinfo.
  1.1033 +*/
  1.1034 +void  dlmalloc_stats(void);
  1.1035 +
  1.1036 +#endif /* ONLY_MSPACES */
  1.1037 +
  1.1038 +#if MSPACES
  1.1039 +
  1.1040 +/*
  1.1041 +  mspace is an opaque type representing an independent
  1.1042 +  region of space that supports mspace_malloc, etc.
  1.1043 +*/
  1.1044 +typedef void* mspace;
  1.1045 +
  1.1046 +/*
  1.1047 +  create_mspace creates and returns a new independent space with the
  1.1048 +  given initial capacity, or, if 0, the default granularity size.  It
  1.1049 +  returns null if there is no system memory available to create the
  1.1050 +  space.  If argument locked is non-zero, the space uses a separate
  1.1051 +  lock to control access. The capacity of the space will grow
  1.1052 +  dynamically as needed to service mspace_malloc requests.  You can
  1.1053 +  control the sizes of incremental increases of this space by
  1.1054 +  compiling with a different DEFAULT_GRANULARITY or dynamically
  1.1055 +  setting with mallopt(M_GRANULARITY, value).
  1.1056 +*/
  1.1057 +mspace create_mspace(size_t capacity, int locked);
  1.1058 +
  1.1059 +/*
  1.1060 +  destroy_mspace destroys the given space, and attempts to return all
  1.1061 +  of its memory back to the system, returning the total number of
  1.1062 +  bytes freed. After destruction, the results of access to all memory
  1.1063 +  used by the space become undefined.
  1.1064 +*/
  1.1065 +size_t destroy_mspace(mspace msp);
  1.1066 +
  1.1067 +/*
  1.1068 +  create_mspace_with_base uses the memory supplied as the initial base
  1.1069 +  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1.1070 +  space is used for bookkeeping, so the capacity must be at least this
  1.1071 +  large. (Otherwise 0 is returned.) When this initial space is
  1.1072 +  exhausted, additional memory will be obtained from the system.
  1.1073 +  Destroying this space will deallocate all additionally allocated
  1.1074 +  space (if possible) but not the initial base.
  1.1075 +*/
  1.1076 +mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1.1077 +
  1.1078 +/*
  1.1079 +  mspace_malloc behaves as malloc, but operates within
  1.1080 +  the given space.
  1.1081 +*/
  1.1082 +void* mspace_malloc(mspace msp, size_t bytes);
  1.1083 +
  1.1084 +/*
  1.1085 +  mspace_free behaves as free, but operates within
  1.1086 +  the given space.
  1.1087 +
  1.1088 +  If compiled with FOOTERS==1, mspace_free is not actually needed.
  1.1089 +  free may be called instead of mspace_free because freed chunks from
  1.1090 +  any space are handled by their originating spaces.
  1.1091 +*/
  1.1092 +void mspace_free(mspace msp, void* mem);
  1.1093 +
  1.1094 +/*
  1.1095 +  mspace_realloc behaves as realloc, but operates within
  1.1096 +  the given space.
  1.1097 +
  1.1098 +  If compiled with FOOTERS==1, mspace_realloc is not actually
  1.1099 +  needed.  realloc may be called instead of mspace_realloc because
  1.1100 +  realloced chunks from any space are handled by their originating
  1.1101 +  spaces.
  1.1102 +*/
  1.1103 +void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1.1104 +
  1.1105 +/*
  1.1106 +  mspace_calloc behaves as calloc, but operates within
  1.1107 +  the given space.
  1.1108 +*/
  1.1109 +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1.1110 +
  1.1111 +/*
  1.1112 +  mspace_memalign behaves as memalign, but operates within
  1.1113 +  the given space.
  1.1114 +*/
  1.1115 +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1.1116 +
  1.1117 +/*
  1.1118 +  mspace_independent_calloc behaves as independent_calloc, but
  1.1119 +  operates within the given space.
  1.1120 +*/
  1.1121 +void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1.1122 +                                 size_t elem_size, void* chunks[]);
  1.1123 +
  1.1124 +/*
  1.1125 +  mspace_independent_comalloc behaves as independent_comalloc, but
  1.1126 +  operates within the given space.
  1.1127 +*/
  1.1128 +void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1.1129 +                                   size_t sizes[], void* chunks[]);
  1.1130 +
  1.1131 +/*
  1.1132 +  mspace_footprint() returns the number of bytes obtained from the
  1.1133 +  system for this space.
  1.1134 +*/
  1.1135 +size_t mspace_footprint(mspace msp);
  1.1136 +
  1.1137 +/*
  1.1138 +  mspace_max_footprint() returns the peak number of bytes obtained from the
  1.1139 +  system for this space.
  1.1140 +*/
  1.1141 +size_t mspace_max_footprint(mspace msp);
  1.1142 +
  1.1143 +
  1.1144 +#if !NO_MALLINFO
  1.1145 +/*
  1.1146 +  mspace_mallinfo behaves as mallinfo, but reports properties of
  1.1147 +  the given space.
  1.1148 +*/
  1.1149 +struct mallinfo mspace_mallinfo(mspace msp);
  1.1150 +#endif /* NO_MALLINFO */
  1.1151 +
  1.1152 +/*
  1.1153 +  mspace_malloc_stats behaves as malloc_stats, but reports
  1.1154 +  properties of the given space.
  1.1155 +*/
  1.1156 +void mspace_malloc_stats(mspace msp);
  1.1157 +
  1.1158 +/*
  1.1159 +  mspace_trim behaves as malloc_trim, but
  1.1160 +  operates within the given space.
  1.1161 +*/
  1.1162 +int mspace_trim(mspace msp, size_t pad);
  1.1163 +
  1.1164 +/*
  1.1165 +  An alias for mallopt.
  1.1166 +*/
  1.1167 +int mspace_mallopt(int, int);
  1.1168 +
  1.1169 +#endif /* MSPACES */
  1.1170 +
  1.1171 +#ifdef __cplusplus
  1.1172 +};  /* end of extern "C" */
  1.1173 +#endif /* __cplusplus */
  1.1174 +
  1.1175 +/*
  1.1176 +  ========================================================================
  1.1177 +  To make a fully customizable malloc.h header file, cut everything
  1.1178 +  above this line, put into file malloc.h, edit to suit, and #include it
  1.1179 +  on the next line, as well as in programs that use this malloc.
  1.1180 +  ========================================================================
  1.1181 +*/
  1.1182 +
  1.1183 +/* #include "malloc.h" */
  1.1184 +
  1.1185 +/*------------------------------ internal #includes ---------------------- */
  1.1186 +
  1.1187 +#ifdef _MSC_VER
  1.1188 +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1.1189 +#endif /* _MSC_VER */
  1.1190 +
  1.1191 +#ifndef LACKS_STDIO_H
  1.1192 +#include <stdio.h>       /* for printing in malloc_stats */
  1.1193 +#endif
  1.1194 +
  1.1195 +#ifndef LACKS_ERRNO_H
  1.1196 +#include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1.1197 +#endif /* LACKS_ERRNO_H */
  1.1198 +#if FOOTERS
  1.1199 +#include <time.h>        /* for magic initialization */
  1.1200 +#endif /* FOOTERS */
  1.1201 +#ifndef LACKS_STDLIB_H
  1.1202 +#include <stdlib.h>      /* for abort() */
  1.1203 +#endif /* LACKS_STDLIB_H */
  1.1204 +#ifdef DEBUG
  1.1205 +#if ABORT_ON_ASSERT_FAILURE
  1.1206 +#define assert(x) if(!(x)) ABORT
  1.1207 +#else /* ABORT_ON_ASSERT_FAILURE */
  1.1208 +#include <assert.h>
  1.1209 +#endif /* ABORT_ON_ASSERT_FAILURE */
  1.1210 +#else  /* DEBUG */
  1.1211 +#define assert(x)
  1.1212 +#endif /* DEBUG */
  1.1213 +#ifndef LACKS_STRING_H
  1.1214 +#include <string.h>      /* for memset etc */
  1.1215 +#endif  /* LACKS_STRING_H */
  1.1216 +#if USE_BUILTIN_FFS
  1.1217 +#ifndef LACKS_STRINGS_H
  1.1218 +#include <strings.h>     /* for ffs */
  1.1219 +#endif /* LACKS_STRINGS_H */
  1.1220 +#endif /* USE_BUILTIN_FFS */
  1.1221 +#if HAVE_MMAP
  1.1222 +#ifndef LACKS_SYS_MMAN_H
  1.1223 +#include <sys/mman.h>    /* for mmap */
  1.1224 +#endif /* LACKS_SYS_MMAN_H */
  1.1225 +#ifndef LACKS_FCNTL_H
  1.1226 +#include <fcntl.h>
  1.1227 +#endif /* LACKS_FCNTL_H */
  1.1228 +#endif /* HAVE_MMAP */
  1.1229 +#if HAVE_MORECORE
  1.1230 +#ifndef LACKS_UNISTD_H
  1.1231 +#include <unistd.h>     /* for sbrk */
  1.1232 +#else /* LACKS_UNISTD_H */
  1.1233 +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1.1234 +extern void*     sbrk(ptrdiff_t);
  1.1235 +#endif /* FreeBSD etc */
  1.1236 +#endif /* LACKS_UNISTD_H */
  1.1237 +#endif /* HAVE_MMAP */
  1.1238 +
  1.1239 +#ifndef WIN32
  1.1240 +#ifndef malloc_getpagesize
  1.1241 +#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1.1242 +#    ifndef _SC_PAGE_SIZE
  1.1243 +#      define _SC_PAGE_SIZE _SC_PAGESIZE
  1.1244 +#    endif
  1.1245 +#  endif
  1.1246 +#  ifdef _SC_PAGE_SIZE
  1.1247 +#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1.1248 +#  else
  1.1249 +#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1.1250 +       extern size_t getpagesize();
  1.1251 +#      define malloc_getpagesize getpagesize()
  1.1252 +#    else
  1.1253 +#      ifdef WIN32 /* use supplied emulation of getpagesize */
  1.1254 +#        define malloc_getpagesize getpagesize()
  1.1255 +#      else
  1.1256 +#        ifndef LACKS_SYS_PARAM_H
  1.1257 +#          include <sys/param.h>
  1.1258 +#        endif
  1.1259 +#        ifdef EXEC_PAGESIZE
  1.1260 +#          define malloc_getpagesize EXEC_PAGESIZE
  1.1261 +#        else
  1.1262 +#          ifdef NBPG
  1.1263 +#            ifndef CLSIZE
  1.1264 +#              define malloc_getpagesize NBPG
  1.1265 +#            else
  1.1266 +#              define malloc_getpagesize (NBPG * CLSIZE)
  1.1267 +#            endif
  1.1268 +#          else
  1.1269 +#            ifdef NBPC
  1.1270 +#              define malloc_getpagesize NBPC
  1.1271 +#            else
  1.1272 +#              ifdef PAGESIZE
  1.1273 +#                define malloc_getpagesize PAGESIZE
  1.1274 +#              else /* just guess */
  1.1275 +#                define malloc_getpagesize ((size_t)4096U)
  1.1276 +#              endif
  1.1277 +#            endif
  1.1278 +#          endif
  1.1279 +#        endif
  1.1280 +#      endif
  1.1281 +#    endif
  1.1282 +#  endif
  1.1283 +#endif
  1.1284 +#endif
  1.1285 +
  1.1286 +/* ------------------- size_t and alignment properties -------------------- */
  1.1287 +
  1.1288 +/* The byte and bit size of a size_t */
  1.1289 +#define SIZE_T_SIZE         (sizeof(size_t))
  1.1290 +#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
  1.1291 +
  1.1292 +/* Some constants coerced to size_t */
  1.1293 +/* Annoying but necessary to avoid errors on some plaftorms */
  1.1294 +#define SIZE_T_ZERO         ((size_t)0)
  1.1295 +#define SIZE_T_ONE          ((size_t)1)
  1.1296 +#define SIZE_T_TWO          ((size_t)2)
  1.1297 +#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
  1.1298 +#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
  1.1299 +#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
  1.1300 +#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
  1.1301 +
  1.1302 +/* The bit mask value corresponding to MALLOC_ALIGNMENT */
  1.1303 +#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
  1.1304 +
  1.1305 +/* True if address a has acceptable alignment */
  1.1306 +#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
  1.1307 +
  1.1308 +/* the number of bytes to offset an address to align it */
  1.1309 +#define align_offset(A)\
  1.1310 + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
  1.1311 +  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
  1.1312 +
  1.1313 +/* -------------------------- MMAP preliminaries ------------------------- */
  1.1314 +
  1.1315 +/*
  1.1316 +   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
  1.1317 +   checks to fail so compiler optimizer can delete code rather than
  1.1318 +   using so many "#if"s.
  1.1319 +*/
  1.1320 +
  1.1321 +
  1.1322 +/* MORECORE and MMAP must return MFAIL on failure */
  1.1323 +#define MFAIL                ((void*)(MAX_SIZE_T))
  1.1324 +#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1.1325 +
  1.1326 +#if !HAVE_MMAP
  1.1327 +#define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1.1328 +#define USE_MMAP_BIT         (SIZE_T_ZERO)
  1.1329 +#define CALL_MMAP(s)         MFAIL
  1.1330 +#define CALL_MUNMAP(a, s)    (-1)
  1.1331 +#define DIRECT_MMAP(s)       MFAIL
  1.1332 +
  1.1333 +#else /* HAVE_MMAP */
  1.1334 +#define IS_MMAPPED_BIT       (SIZE_T_ONE)
  1.1335 +#define USE_MMAP_BIT         (SIZE_T_ONE)
  1.1336 +
  1.1337 +#ifndef WIN32
  1.1338 +#define CALL_MUNMAP(a, s)    munmap((a), (s))
  1.1339 +#define MMAP_PROT            (PROT_READ|PROT_WRITE)
  1.1340 +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
  1.1341 +#define MAP_ANONYMOUS        MAP_ANON
  1.1342 +#endif /* MAP_ANON */
  1.1343 +#ifdef MAP_ANONYMOUS
  1.1344 +#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
  1.1345 +#define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
  1.1346 +#else /* MAP_ANONYMOUS */
  1.1347 +/*
  1.1348 +   Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1.1349 +   is unlikely to be needed, but is supplied just in case.
  1.1350 +*/
  1.1351 +#define MMAP_FLAGS           (MAP_PRIVATE)
  1.1352 +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1.1353 +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1.1354 +           (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1.1355 +            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1.1356 +            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1.1357 +#endif /* MAP_ANONYMOUS */
  1.1358 +
  1.1359 +#define DIRECT_MMAP(s)       CALL_MMAP(s)
  1.1360 +#else /* WIN32 */
  1.1361 +
  1.1362 +/* Win32 MMAP via VirtualAlloc */
  1.1363 +static void* win32mmap(size_t size) {
  1.1364 +  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1.1365 +  return (ptr != 0)? ptr: MFAIL;
  1.1366 +}
  1.1367 +
  1.1368 +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1.1369 +static void* win32direct_mmap(size_t size) {
  1.1370 +  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1.1371 +                           PAGE_READWRITE);
  1.1372 +  return (ptr != 0)? ptr: MFAIL;
  1.1373 +}
  1.1374 +
  1.1375 +/* This function supports releasing coalesed segments */
  1.1376 +static int win32munmap(void* ptr, size_t size) {
  1.1377 +  MEMORY_BASIC_INFORMATION minfo;
  1.1378 +  char* cptr = ptr;
  1.1379 +  while (size) {
  1.1380 +    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1.1381 +      return -1;
  1.1382 +    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1.1383 +        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1.1384 +      return -1;
  1.1385 +    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1.1386 +      return -1;
  1.1387 +    cptr += minfo.RegionSize;
  1.1388 +    size -= minfo.RegionSize;
  1.1389 +  }
  1.1390 +  return 0;
  1.1391 +}
  1.1392 +
  1.1393 +#define CALL_MMAP(s)         win32mmap(s)
  1.1394 +#define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1.1395 +#define DIRECT_MMAP(s)       win32direct_mmap(s)
  1.1396 +#endif /* WIN32 */
  1.1397 +#endif /* HAVE_MMAP */
  1.1398 +
  1.1399 +#if HAVE_MMAP && HAVE_MREMAP
  1.1400 +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1.1401 +#else  /* HAVE_MMAP && HAVE_MREMAP */
  1.1402 +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1.1403 +#endif /* HAVE_MMAP && HAVE_MREMAP */
  1.1404 +
  1.1405 +#if HAVE_MORECORE
  1.1406 +#define CALL_MORECORE(S)     MORECORE(S)
  1.1407 +#else  /* HAVE_MORECORE */
  1.1408 +#define CALL_MORECORE(S)     MFAIL
  1.1409 +#endif /* HAVE_MORECORE */
  1.1410 +
  1.1411 +/* mstate bit set if continguous morecore disabled or failed */
  1.1412 +#define USE_NONCONTIGUOUS_BIT (4U)
  1.1413 +
  1.1414 +/* segment bit set in create_mspace_with_base */
  1.1415 +#define EXTERN_BIT            (8U)
  1.1416 +
  1.1417 +
  1.1418 +/* --------------------------- Lock preliminaries ------------------------ */
  1.1419 +
  1.1420 +#if USE_LOCKS
  1.1421 +
  1.1422 +/*
  1.1423 +  When locks are defined, there are up to two global locks:
  1.1424 +
  1.1425 +  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
  1.1426 +    MORECORE.  In many cases sys_alloc requires two calls, that should
  1.1427 +    not be interleaved with calls by other threads.  This does not
  1.1428 +    protect against direct calls to MORECORE by other threads not
  1.1429 +    using this lock, so there is still code to cope the best we can on
  1.1430 +    interference.
  1.1431 +
  1.1432 +  * magic_init_mutex ensures that mparams.magic and other
  1.1433 +    unique mparams values are initialized only once.
  1.1434 +*/
  1.1435 +
  1.1436 +#ifndef WIN32
  1.1437 +/* By default use posix locks */
  1.1438 +#include <pthread.h>
  1.1439 +#define MLOCK_T pthread_mutex_t
  1.1440 +#define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
  1.1441 +#define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
  1.1442 +#define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
  1.1443 +
  1.1444 +#if HAVE_MORECORE
  1.1445 +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
  1.1446 +#endif /* HAVE_MORECORE */
  1.1447 +
  1.1448 +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
  1.1449 +
  1.1450 +#else /* WIN32 */
  1.1451 +/*
  1.1452 +   Because lock-protected regions have bounded times, and there
  1.1453 +   are no recursive lock calls, we can use simple spinlocks.
  1.1454 +*/
  1.1455 +
  1.1456 +#define MLOCK_T long
  1.1457 +static int win32_acquire_lock (MLOCK_T *sl) {
  1.1458 +  for (;;) {
  1.1459 +#ifdef InterlockedCompareExchangePointer
  1.1460 +    if (!InterlockedCompareExchange(sl, 1, 0))
  1.1461 +      return 0;
  1.1462 +#else  /* Use older void* version */
  1.1463 +    if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1.1464 +      return 0;
  1.1465 +#endif /* InterlockedCompareExchangePointer */
  1.1466 +    Sleep (0);
  1.1467 +  }
  1.1468 +}
  1.1469 +
  1.1470 +static void win32_release_lock (MLOCK_T *sl) {
  1.1471 +  InterlockedExchange (sl, 0);
  1.1472 +}
  1.1473 +
  1.1474 +#define INITIAL_LOCK(l)      *(l)=0
  1.1475 +#define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1.1476 +#define RELEASE_LOCK(l)      win32_release_lock(l)
  1.1477 +#if HAVE_MORECORE
  1.1478 +static MLOCK_T morecore_mutex;
  1.1479 +#endif /* HAVE_MORECORE */
  1.1480 +static MLOCK_T magic_init_mutex;
  1.1481 +#endif /* WIN32 */
  1.1482 +
  1.1483 +#define USE_LOCK_BIT               (2U)
  1.1484 +#else  /* USE_LOCKS */
  1.1485 +#define USE_LOCK_BIT               (0U)
  1.1486 +#define INITIAL_LOCK(l)
  1.1487 +#endif /* USE_LOCKS */
  1.1488 +
  1.1489 +#if USE_LOCKS && HAVE_MORECORE
  1.1490 +#define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
  1.1491 +#define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
  1.1492 +#else /* USE_LOCKS && HAVE_MORECORE */
  1.1493 +#define ACQUIRE_MORECORE_LOCK()
  1.1494 +#define RELEASE_MORECORE_LOCK()
  1.1495 +#endif /* USE_LOCKS && HAVE_MORECORE */
  1.1496 +
  1.1497 +#if USE_LOCKS
  1.1498 +#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1.1499 +#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1.1500 +#else  /* USE_LOCKS */
  1.1501 +#define ACQUIRE_MAGIC_INIT_LOCK()
  1.1502 +#define RELEASE_MAGIC_INIT_LOCK()
  1.1503 +#endif /* USE_LOCKS */
  1.1504 +
  1.1505 +
  1.1506 +/* -----------------------  Chunk representations ------------------------ */
  1.1507 +
  1.1508 +/*
  1.1509 +  (The following includes lightly edited explanations by Colin Plumb.)
  1.1510 +
  1.1511 +  The malloc_chunk declaration below is misleading (but accurate and
  1.1512 +  necessary).  It declares a "view" into memory allowing access to
  1.1513 +  necessary fields at known offsets from a given base.
  1.1514 +
  1.1515 +  Chunks of memory are maintained using a `boundary tag' method as
  1.1516 +  originally described by Knuth.  (See the paper by Paul Wilson
  1.1517 +  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
  1.1518 +  techniques.)  Sizes of free chunks are stored both in the front of
  1.1519 +  each chunk and at the end.  This makes consolidating fragmented
  1.1520 +  chunks into bigger chunks fast.  The head fields also hold bits
  1.1521 +  representing whether chunks are free or in use.
  1.1522 +
  1.1523 +  Here are some pictures to make it clearer.  They are "exploded" to
  1.1524 +  show that the state of a chunk can be thought of as extending from
  1.1525 +  the high 31 bits of the head field of its header through the
  1.1526 +  prev_foot and PINUSE_BIT bit of the following chunk header.
  1.1527 +
  1.1528 +  A chunk that's in use looks like:
  1.1529 +
  1.1530 +   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1531 +           | Size of previous chunk (if P = 1)                             |
  1.1532 +           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1533 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1.1534 +         | Size of this chunk                                         1| +-+
  1.1535 +   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1536 +         |                                                               |
  1.1537 +         +-                                                             -+
  1.1538 +         |                                                               |
  1.1539 +         +-                                                             -+
  1.1540 +         |                                                               :
  1.1541 +         +-      size - sizeof(size_t) available payload bytes          -+
  1.1542 +         :                                                               |
  1.1543 + chunk-> +-                                                             -+
  1.1544 +         |                                                               |
  1.1545 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1546 +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
  1.1547 +       | Size of next chunk (may or may not be in use)               | +-+
  1.1548 + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1549 +
  1.1550 +    And if it's free, it looks like this:
  1.1551 +
  1.1552 +   chunk-> +-                                                             -+
  1.1553 +           | User payload (must be in use, or we would have merged!)       |
  1.1554 +           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1555 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
  1.1556 +         | Size of this chunk                                         0| +-+
  1.1557 +   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1558 +         | Next pointer                                                  |
  1.1559 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1560 +         | Prev pointer                                                  |
  1.1561 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1562 +         |                                                               :
  1.1563 +         +-      size - sizeof(struct chunk) unused bytes               -+
  1.1564 +         :                                                               |
  1.1565 + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1566 +         | Size of this chunk                                            |
  1.1567 +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1568 +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
  1.1569 +       | Size of next chunk (must be in use, or we would have merged)| +-+
  1.1570 + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1571 +       |                                                               :
  1.1572 +       +- User payload                                                -+
  1.1573 +       :                                                               |
  1.1574 +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1575 +                                                                     |0|
  1.1576 +                                                                     +-+
  1.1577 +  Note that since we always merge adjacent free chunks, the chunks
  1.1578 +  adjacent to a free chunk must be in use.
  1.1579 +
  1.1580 +  Given a pointer to a chunk (which can be derived trivially from the
  1.1581 +  payload pointer) we can, in O(1) time, find out whether the adjacent
  1.1582 +  chunks are free, and if so, unlink them from the lists that they
  1.1583 +  are on and merge them with the current chunk.
  1.1584 +
  1.1585 +  Chunks always begin on even word boundaries, so the mem portion
  1.1586 +  (which is returned to the user) is also on an even word boundary, and
  1.1587 +  thus at least double-word aligned.
  1.1588 +
  1.1589 +  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
  1.1590 +  chunk size (which is always a multiple of two words), is an in-use
  1.1591 +  bit for the *previous* chunk.  If that bit is *clear*, then the
  1.1592 +  word before the current chunk size contains the previous chunk
  1.1593 +  size, and can be used to find the front of the previous chunk.
  1.1594 +  The very first chunk allocated always has this bit set, preventing
  1.1595 +  access to non-existent (or non-owned) memory. If pinuse is set for
  1.1596 +  any given chunk, then you CANNOT determine the size of the
  1.1597 +  previous chunk, and might even get a memory addressing fault when
  1.1598 +  trying to do so.
  1.1599 +
  1.1600 +  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
  1.1601 +  the chunk size redundantly records whether the current chunk is
  1.1602 +  inuse. This redundancy enables usage checks within free and realloc,
  1.1603 +  and reduces indirection when freeing and consolidating chunks.
  1.1604 +
  1.1605 +  Each freshly allocated chunk must have both cinuse and pinuse set.
  1.1606 +  That is, each allocated chunk borders either a previously allocated
  1.1607 +  and still in-use chunk, or the base of its memory arena. This is
  1.1608 +  ensured by making all allocations from the the `lowest' part of any
  1.1609 +  found chunk.  Further, no free chunk physically borders another one,
  1.1610 +  so each free chunk is known to be preceded and followed by either
  1.1611 +  inuse chunks or the ends of memory.
  1.1612 +
  1.1613 +  Note that the `foot' of the current chunk is actually represented
  1.1614 +  as the prev_foot of the NEXT chunk. This makes it easier to
  1.1615 +  deal with alignments etc but can be very confusing when trying
  1.1616 +  to extend or adapt this code.
  1.1617 +
  1.1618 +  The exceptions to all this are
  1.1619 +
  1.1620 +     1. The special chunk `top' is the top-most available chunk (i.e.,
  1.1621 +        the one bordering the end of available memory). It is treated
  1.1622 +        specially.  Top is never included in any bin, is used only if
  1.1623 +        no other chunk is available, and is released back to the
  1.1624 +        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
  1.1625 +        the top chunk is treated as larger (and thus less well
  1.1626 +        fitting) than any other available chunk.  The top chunk
  1.1627 +        doesn't update its trailing size field since there is no next
  1.1628 +        contiguous chunk that would have to index off it. However,
  1.1629 +        space is still allocated for it (TOP_FOOT_SIZE) to enable
  1.1630 +        separation or merging when space is extended.
  1.1631 +
  1.1632 +     3. Chunks allocated via mmap, which have the lowest-order bit
  1.1633 +        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
  1.1634 +        PINUSE_BIT in their head fields.  Because they are allocated
  1.1635 +        one-by-one, each must carry its own prev_foot field, which is
  1.1636 +        also used to hold the offset this chunk has within its mmapped
  1.1637 +        region, which is needed to preserve alignment. Each mmapped
  1.1638 +        chunk is trailed by the first two fields of a fake next-chunk
  1.1639 +        for sake of usage checks.
  1.1640 +
  1.1641 +*/
  1.1642 +
  1.1643 +struct malloc_chunk {
  1.1644 +  size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1.1645 +  size_t               head;       /* Size and inuse bits. */
  1.1646 +  struct malloc_chunk* fd;         /* double links -- used only if free. */
  1.1647 +  struct malloc_chunk* bk;
  1.1648 +};
  1.1649 +
  1.1650 +typedef struct malloc_chunk  mchunk;
  1.1651 +typedef struct malloc_chunk* mchunkptr;
  1.1652 +typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1.1653 +typedef unsigned int bindex_t;         /* Described below */
  1.1654 +typedef unsigned int binmap_t;         /* Described below */
  1.1655 +typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1.1656 +
  1.1657 +/* ------------------- Chunks sizes and alignments ----------------------- */
  1.1658 +
  1.1659 +#define MCHUNK_SIZE         (sizeof(mchunk))
  1.1660 +
  1.1661 +#if FOOTERS
  1.1662 +#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
  1.1663 +#else /* FOOTERS */
  1.1664 +#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
  1.1665 +#endif /* FOOTERS */
  1.1666 +
  1.1667 +/* MMapped chunks need a second word of overhead ... */
  1.1668 +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
  1.1669 +/* ... and additional padding for fake next-chunk at foot */
  1.1670 +#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
  1.1671 +
  1.1672 +/* The smallest size we can malloc is an aligned minimal chunk */
  1.1673 +#define MIN_CHUNK_SIZE\
  1.1674 +  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1.1675 +
  1.1676 +/* conversion from malloc headers to user pointers, and back */
  1.1677 +#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
  1.1678 +#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
  1.1679 +/* chunk associated with aligned address A */
  1.1680 +#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
  1.1681 +
  1.1682 +/* Bounds on request (not chunk) sizes. */
  1.1683 +#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
  1.1684 +#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
  1.1685 +
  1.1686 +/* pad request bytes into a usable size */
  1.1687 +#define pad_request(req) \
  1.1688 +   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
  1.1689 +
  1.1690 +/* pad request, checking for minimum (but not maximum) */
  1.1691 +#define request2size(req) \
  1.1692 +  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
  1.1693 +
  1.1694 +
  1.1695 +/* ------------------ Operations on head and foot fields ----------------- */
  1.1696 +
  1.1697 +/*
  1.1698 +  The head field of a chunk is or'ed with PINUSE_BIT when previous
  1.1699 +  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
  1.1700 +  use. If the chunk was obtained with mmap, the prev_foot field has
  1.1701 +  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
  1.1702 +  mmapped region to the base of the chunk.
  1.1703 +*/
  1.1704 +
  1.1705 +#define PINUSE_BIT          (SIZE_T_ONE)
  1.1706 +#define CINUSE_BIT          (SIZE_T_TWO)
  1.1707 +#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
  1.1708 +
  1.1709 +/* Head value for fenceposts */
  1.1710 +#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
  1.1711 +
  1.1712 +/* extraction of fields from head words */
  1.1713 +#define cinuse(p)           ((p)->head & CINUSE_BIT)
  1.1714 +#define pinuse(p)           ((p)->head & PINUSE_BIT)
  1.1715 +#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
  1.1716 +
  1.1717 +#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
  1.1718 +#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
  1.1719 +
  1.1720 +/* Treat space at ptr +/- offset as a chunk */
  1.1721 +#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
  1.1722 +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
  1.1723 +
  1.1724 +/* Ptr to next or previous physical malloc_chunk. */
  1.1725 +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
  1.1726 +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
  1.1727 +
  1.1728 +/* extract next chunk's pinuse bit */
  1.1729 +#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
  1.1730 +
  1.1731 +/* Get/set size at footer */
  1.1732 +#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
  1.1733 +#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
  1.1734 +
  1.1735 +/* Set size, pinuse bit, and foot */
  1.1736 +#define set_size_and_pinuse_of_free_chunk(p, s)\
  1.1737 +  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
  1.1738 +
  1.1739 +/* Set size, pinuse bit, foot, and clear next pinuse */
  1.1740 +#define set_free_with_pinuse(p, s, n)\
  1.1741 +  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
  1.1742 +
  1.1743 +#define is_mmapped(p)\
  1.1744 +  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
  1.1745 +
  1.1746 +/* Get the internal overhead associated with chunk p */
  1.1747 +#define overhead_for(p)\
  1.1748 + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
  1.1749 +
  1.1750 +/* Return true if malloced space is not necessarily cleared */
  1.1751 +#if MMAP_CLEARS
  1.1752 +#define calloc_must_clear(p) (!is_mmapped(p))
  1.1753 +#else /* MMAP_CLEARS */
  1.1754 +#define calloc_must_clear(p) (1)
  1.1755 +#endif /* MMAP_CLEARS */
  1.1756 +
  1.1757 +/* ---------------------- Overlaid data structures ----------------------- */
  1.1758 +
  1.1759 +/*
  1.1760 +  When chunks are not in use, they are treated as nodes of either
  1.1761 +  lists or trees.
  1.1762 +
  1.1763 +  "Small"  chunks are stored in circular doubly-linked lists, and look
  1.1764 +  like this:
  1.1765 +
  1.1766 +    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1767 +            |             Size of previous chunk                            |
  1.1768 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1769 +    `head:' |             Size of chunk, in bytes                         |P|
  1.1770 +      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1771 +            |             Forward pointer to next chunk in list             |
  1.1772 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1773 +            |             Back pointer to previous chunk in list            |
  1.1774 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1775 +            |             Unused space (may be 0 bytes long)                .
  1.1776 +            .                                                               .
  1.1777 +            .                                                               |
  1.1778 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1779 +    `foot:' |             Size of chunk, in bytes                           |
  1.1780 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1781 +
  1.1782 +  Larger chunks are kept in a form of bitwise digital trees (aka
  1.1783 +  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
  1.1784 +  free chunks greater than 256 bytes, their size doesn't impose any
  1.1785 +  constraints on user chunk sizes.  Each node looks like:
  1.1786 +
  1.1787 +    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1788 +            |             Size of previous chunk                            |
  1.1789 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1790 +    `head:' |             Size of chunk, in bytes                         |P|
  1.1791 +      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1792 +            |             Forward pointer to next chunk of same size        |
  1.1793 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1794 +            |             Back pointer to previous chunk of same size       |
  1.1795 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1796 +            |             Pointer to left child (child[0])                  |
  1.1797 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1798 +            |             Pointer to right child (child[1])                 |
  1.1799 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1800 +            |             Pointer to parent                                 |
  1.1801 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1802 +            |             bin index of this chunk                           |
  1.1803 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1804 +            |             Unused space                                      .
  1.1805 +            .                                                               |
  1.1806 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1807 +    `foot:' |             Size of chunk, in bytes                           |
  1.1808 +            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  1.1809 +
  1.1810 +  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
  1.1811 +  of the same size are arranged in a circularly-linked list, with only
  1.1812 +  the oldest chunk (the next to be used, in our FIFO ordering)
  1.1813 +  actually in the tree.  (Tree members are distinguished by a non-null
  1.1814 +  parent pointer.)  If a chunk with the same size an an existing node
  1.1815 +  is inserted, it is linked off the existing node using pointers that
  1.1816 +  work in the same way as fd/bk pointers of small chunks.
  1.1817 +
  1.1818 +  Each tree contains a power of 2 sized range of chunk sizes (the
  1.1819 +  smallest is 0x100 <= x < 0x180), which is is divided in half at each
  1.1820 +  tree level, with the chunks in the smaller half of the range (0x100
  1.1821 +  <= x < 0x140 for the top nose) in the left subtree and the larger
  1.1822 +  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
  1.1823 +  done by inspecting individual bits.
  1.1824 +
  1.1825 +  Using these rules, each node's left subtree contains all smaller
  1.1826 +  sizes than its right subtree.  However, the node at the root of each
  1.1827 +  subtree has no particular ordering relationship to either.  (The
  1.1828 +  dividing line between the subtree sizes is based on trie relation.)
  1.1829 +  If we remove the last chunk of a given size from the interior of the
  1.1830 +  tree, we need to replace it with a leaf node.  The tree ordering
  1.1831 +  rules permit a node to be replaced by any leaf below it.
  1.1832 +
  1.1833 +  The smallest chunk in a tree (a common operation in a best-fit
  1.1834 +  allocator) can be found by walking a path to the leftmost leaf in
  1.1835 +  the tree.  Unlike a usual binary tree, where we follow left child
  1.1836 +  pointers until we reach a null, here we follow the right child
  1.1837 +  pointer any time the left one is null, until we reach a leaf with
  1.1838 +  both child pointers null. The smallest chunk in the tree will be
  1.1839 +  somewhere along that path.
  1.1840 +
  1.1841 +  The worst case number of steps to add, find, or remove a node is
  1.1842 +  bounded by the number of bits differentiating chunks within
  1.1843 +  bins. Under current bin calculations, this ranges from 6 up to 21
  1.1844 +  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1.1845 +  is of course much better.
  1.1846 +*/
  1.1847 +
  1.1848 +struct malloc_tree_chunk {
  1.1849 +  /* The first four fields must be compatible with malloc_chunk */
  1.1850 +  size_t                    prev_foot;
  1.1851 +  size_t                    head;
  1.1852 +  struct malloc_tree_chunk* fd;
  1.1853 +  struct malloc_tree_chunk* bk;
  1.1854 +
  1.1855 +  struct malloc_tree_chunk* child[2];
  1.1856 +  struct malloc_tree_chunk* parent;
  1.1857 +  bindex_t                  index;
  1.1858 +};
  1.1859 +
  1.1860 +typedef struct malloc_tree_chunk  tchunk;
  1.1861 +typedef struct malloc_tree_chunk* tchunkptr;
  1.1862 +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1.1863 +
  1.1864 +/* A little helper macro for trees */
  1.1865 +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1.1866 +
  1.1867 +/* ----------------------------- Segments -------------------------------- */
  1.1868 +
  1.1869 +/*
  1.1870 +  Each malloc space may include non-contiguous segments, held in a
  1.1871 +  list headed by an embedded malloc_segment record representing the
  1.1872 +  top-most space. Segments also include flags holding properties of
  1.1873 +  the space. Large chunks that are directly allocated by mmap are not
  1.1874 +  included in this list. They are instead independently created and
  1.1875 +  destroyed without otherwise keeping track of them.
  1.1876 +
  1.1877 +  Segment management mainly comes into play for spaces allocated by
  1.1878 +  MMAP.  Any call to MMAP might or might not return memory that is
  1.1879 +  adjacent to an existing segment.  MORECORE normally contiguously
  1.1880 +  extends the current space, so this space is almost always adjacent,
  1.1881 +  which is simpler and faster to deal with. (This is why MORECORE is
  1.1882 +  used preferentially to MMAP when both are available -- see
  1.1883 +  sys_alloc.)  When allocating using MMAP, we don't use any of the
  1.1884 +  hinting mechanisms (inconsistently) supported in various
  1.1885 +  implementations of unix mmap, or distinguish reserving from
  1.1886 +  committing memory. Instead, we just ask for space, and exploit
  1.1887 +  contiguity when we get it.  It is probably possible to do
  1.1888 +  better than this on some systems, but no general scheme seems
  1.1889 +  to be significantly better.
  1.1890 +
  1.1891 +  Management entails a simpler variant of the consolidation scheme
  1.1892 +  used for chunks to reduce fragmentation -- new adjacent memory is
  1.1893 +  normally prepended or appended to an existing segment. However,
  1.1894 +  there are limitations compared to chunk consolidation that mostly
  1.1895 +  reflect the fact that segment processing is relatively infrequent
  1.1896 +  (occurring only when getting memory from system) and that we
  1.1897 +  don't expect to have huge numbers of segments:
  1.1898 +
  1.1899 +  * Segments are not indexed, so traversal requires linear scans.  (It
  1.1900 +    would be possible to index these, but is not worth the extra
  1.1901 +    overhead and complexity for most programs on most platforms.)
  1.1902 +  * New segments are only appended to old ones when holding top-most
  1.1903 +    memory; if they cannot be prepended to others, they are held in
  1.1904 +    different segments.
  1.1905 +
  1.1906 +  Except for the top-most segment of an mstate, each segment record
  1.1907 +  is kept at the tail of its segment. Segments are added by pushing
  1.1908 +  segment records onto the list headed by &mstate.seg for the
  1.1909 +  containing mstate.
  1.1910 +
  1.1911 +  Segment flags control allocation/merge/deallocation policies:
  1.1912 +  * If EXTERN_BIT set, then we did not allocate this segment,
  1.1913 +    and so should not try to deallocate or merge with others.
  1.1914 +    (This currently holds only for the initial segment passed
  1.1915 +    into create_mspace_with_base.)
  1.1916 +  * If IS_MMAPPED_BIT set, the segment may be merged with
  1.1917 +    other surrounding mmapped segments and trimmed/de-allocated
  1.1918 +    using munmap.
  1.1919 +  * If neither bit is set, then the segment was obtained using
  1.1920 +    MORECORE so can be merged with surrounding MORECORE'd segments
  1.1921 +    and deallocated/trimmed using MORECORE with negative arguments.
  1.1922 +*/
  1.1923 +
  1.1924 +struct malloc_segment {
  1.1925 +  char*        base;             /* base address */
  1.1926 +  size_t       size;             /* allocated size */
  1.1927 +  struct malloc_segment* next;   /* ptr to next segment */
  1.1928 +  flag_t       sflags;           /* mmap and extern flag */
  1.1929 +};
  1.1930 +
  1.1931 +#define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1.1932 +#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1.1933 +
  1.1934 +typedef struct malloc_segment  msegment;
  1.1935 +typedef struct malloc_segment* msegmentptr;
  1.1936 +
  1.1937 +/* ---------------------------- malloc_state ----------------------------- */
  1.1938 +
  1.1939 +/*
  1.1940 +   A malloc_state holds all of the bookkeeping for a space.
  1.1941 +   The main fields are:
  1.1942 +
  1.1943 +  Top
  1.1944 +    The topmost chunk of the currently active segment. Its size is
  1.1945 +    cached in topsize.  The actual size of topmost space is
  1.1946 +    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
  1.1947 +    fenceposts and segment records if necessary when getting more
  1.1948 +    space from the system.  The size at which to autotrim top is
  1.1949 +    cached from mparams in trim_check, except that it is disabled if
  1.1950 +    an autotrim fails.
  1.1951 +
  1.1952 +  Designated victim (dv)
  1.1953 +    This is the preferred chunk for servicing small requests that
  1.1954 +    don't have exact fits.  It is normally the chunk split off most
  1.1955 +    recently to service another small request.  Its size is cached in
  1.1956 +    dvsize. The link fields of this chunk are not maintained since it
  1.1957 +    is not kept in a bin.
  1.1958 +
  1.1959 +  SmallBins
  1.1960 +    An array of bin headers for free chunks.  These bins hold chunks
  1.1961 +    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
  1.1962 +    chunks of all the same size, spaced 8 bytes apart.  To simplify
  1.1963 +    use in double-linked lists, each bin header acts as a malloc_chunk
  1.1964 +    pointing to the real first node, if it exists (else pointing to
  1.1965 +    itself).  This avoids special-casing for headers.  But to avoid
  1.1966 +    waste, we allocate only the fd/bk pointers of bins, and then use
  1.1967 +    repositioning tricks to treat these as the fields of a chunk.
  1.1968 +
  1.1969 +  TreeBins
  1.1970 +    Treebins are pointers to the roots of trees holding a range of
  1.1971 +    sizes. There are 2 equally spaced treebins for each power of two
  1.1972 +    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
  1.1973 +    larger.
  1.1974 +
  1.1975 +  Bin maps
  1.1976 +    There is one bit map for small bins ("smallmap") and one for
  1.1977 +    treebins ("treemap).  Each bin sets its bit when non-empty, and
  1.1978 +    clears the bit when empty.  Bit operations are then used to avoid
  1.1979 +    bin-by-bin searching -- nearly all "search" is done without ever
  1.1980 +    looking at bins that won't be selected.  The bit maps
  1.1981 +    conservatively use 32 bits per map word, even if on 64bit system.
  1.1982 +    For a good description of some of the bit-based techniques used
  1.1983 +    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
  1.1984 +    supplement at http://hackersdelight.org/). Many of these are
  1.1985 +    intended to reduce the branchiness of paths through malloc etc, as
  1.1986 +    well as to reduce the number of memory locations read or written.
  1.1987 +
  1.1988 +  Segments
  1.1989 +    A list of segments headed by an embedded malloc_segment record
  1.1990 +    representing the initial space.
  1.1991 +
  1.1992 +  Address check support
  1.1993 +    The least_addr field is the least address ever obtained from
  1.1994 +    MORECORE or MMAP. Attempted frees and reallocs of any address less
  1.1995 +    than this are trapped (unless INSECURE is defined).
  1.1996 +
  1.1997 +  Magic tag
  1.1998 +    A cross-check field that should always hold same value as mparams.magic.
  1.1999 +
  1.2000 +  Flags
  1.2001 +    Bits recording whether to use MMAP, locks, or contiguous MORECORE
  1.2002 +
  1.2003 +  Statistics
  1.2004 +    Each space keeps track of current and maximum system memory
  1.2005 +    obtained via MORECORE or MMAP.
  1.2006 +
  1.2007 +  Locking
  1.2008 +    If USE_LOCKS is defined, the "mutex" lock is acquired and released
  1.2009 +    around every public call using this mspace.
  1.2010 +*/
  1.2011 +
  1.2012 +/* Bin types, widths and sizes */
  1.2013 +#define NSMALLBINS        (32U)
  1.2014 +#define NTREEBINS         (32U)
  1.2015 +#define SMALLBIN_SHIFT    (3U)
  1.2016 +#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
  1.2017 +#define TREEBIN_SHIFT     (8U)
  1.2018 +#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  1.2019 +#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  1.2020 +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  1.2021 +
  1.2022 +struct malloc_state {
  1.2023 +  binmap_t   smallmap;
  1.2024 +  binmap_t   treemap;
  1.2025 +  size_t     dvsize;
  1.2026 +  size_t     topsize;
  1.2027 +  char*      least_addr;
  1.2028 +  mchunkptr  dv;
  1.2029 +  mchunkptr  top;
  1.2030 +  size_t     trim_check;
  1.2031 +  size_t     magic;
  1.2032 +  mchunkptr  smallbins[(NSMALLBINS+1)*2];
  1.2033 +  tbinptr    treebins[NTREEBINS];
  1.2034 +  size_t     footprint;
  1.2035 +  size_t     max_footprint;
  1.2036 +  flag_t     mflags;
  1.2037 +#if USE_LOCKS
  1.2038 +  MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  1.2039 +#endif /* USE_LOCKS */
  1.2040 +  msegment   seg;
  1.2041 +};
  1.2042 +
  1.2043 +typedef struct malloc_state*    mstate;
  1.2044 +
  1.2045 +/* ------------- Global malloc_state and malloc_params ------------------- */
  1.2046 +
  1.2047 +/*
  1.2048 +  malloc_params holds global properties, including those that can be
  1.2049 +  dynamically set using mallopt. There is a single instance, mparams,
  1.2050 +  initialized in init_mparams.
  1.2051 +*/
  1.2052 +
  1.2053 +struct malloc_params {
  1.2054 +  size_t magic;
  1.2055 +  size_t page_size;
  1.2056 +  size_t granularity;
  1.2057 +  size_t mmap_threshold;
  1.2058 +  size_t trim_threshold;
  1.2059 +  flag_t default_mflags;
  1.2060 +};
  1.2061 +
  1.2062 +static struct malloc_params mparams;
  1.2063 +
  1.2064 +/* The global malloc_state used for all non-"mspace" calls */
  1.2065 +static struct malloc_state _gm_;
  1.2066 +#define gm                 (&_gm_)
  1.2067 +#define is_global(M)       ((M) == &_gm_)
  1.2068 +#define is_initialized(M)  ((M)->top != 0)
  1.2069 +
  1.2070 +/* -------------------------- system alloc setup ------------------------- */
  1.2071 +
  1.2072 +/* Operations on mflags */
  1.2073 +
  1.2074 +#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
  1.2075 +#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
  1.2076 +#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
  1.2077 +
  1.2078 +#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
  1.2079 +#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
  1.2080 +#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
  1.2081 +
  1.2082 +#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
  1.2083 +#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
  1.2084 +
  1.2085 +#define set_lock(M,L)\
  1.2086 + ((M)->mflags = (L)?\
  1.2087 +  ((M)->mflags | USE_LOCK_BIT) :\
  1.2088 +  ((M)->mflags & ~USE_LOCK_BIT))
  1.2089 +
  1.2090 +/* page-align a size */
  1.2091 +#define page_align(S)\
  1.2092 + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
  1.2093 +
  1.2094 +/* granularity-align a size */
  1.2095 +#define granularity_align(S)\
  1.2096 +  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
  1.2097 +
  1.2098 +#define is_page_aligned(S)\
  1.2099 +   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
  1.2100 +#define is_granularity_aligned(S)\
  1.2101 +   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
  1.2102 +
  1.2103 +/*  True if segment S holds address A */
  1.2104 +#define segment_holds(S, A)\
  1.2105 +  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  1.2106 +
  1.2107 +/* Return segment holding given address */
  1.2108 +static msegmentptr segment_holding(mstate m, char* addr) {
  1.2109 +  msegmentptr sp = &m->seg;
  1.2110 +  for (;;) {
  1.2111 +    if (addr >= sp->base && addr < sp->base + sp->size)
  1.2112 +      return sp;
  1.2113 +    if ((sp = sp->next) == 0)
  1.2114 +      return 0;
  1.2115 +  }
  1.2116 +}
  1.2117 +
  1.2118 +/* Return true if segment contains a segment link */
  1.2119 +static int has_segment_link(mstate m, msegmentptr ss) {
  1.2120 +  msegmentptr sp = &m->seg;
  1.2121 +  for (;;) {
  1.2122 +    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  1.2123 +      return 1;
  1.2124 +    if ((sp = sp->next) == 0)
  1.2125 +      return 0;
  1.2126 +  }
  1.2127 +}
  1.2128 +
  1.2129 +#ifndef MORECORE_CANNOT_TRIM
  1.2130 +#define should_trim(M,s)  ((s) > (M)->trim_check)
  1.2131 +#else  /* MORECORE_CANNOT_TRIM */
  1.2132 +#define should_trim(M,s)  (0)
  1.2133 +#endif /* MORECORE_CANNOT_TRIM */
  1.2134 +
  1.2135 +/*
  1.2136 +  TOP_FOOT_SIZE is padding at the end of a segment, including space
  1.2137 +  that may be needed to place segment records and fenceposts when new
  1.2138 +  noncontiguous segments are added.
  1.2139 +*/
  1.2140 +#define TOP_FOOT_SIZE\
  1.2141 +  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
  1.2142 +
  1.2143 +
  1.2144 +/* -------------------------------  Hooks -------------------------------- */
  1.2145 +
  1.2146 +/*
  1.2147 +  PREACTION should be defined to return 0 on success, and nonzero on
  1.2148 +  failure. If you are not using locking, you can redefine these to do
  1.2149 +  anything you like.
  1.2150 +*/
  1.2151 +
  1.2152 +#if USE_LOCKS
  1.2153 +
  1.2154 +/* Ensure locks are initialized */
  1.2155 +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
  1.2156 +
  1.2157 +#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
  1.2158 +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  1.2159 +#else /* USE_LOCKS */
  1.2160 +
  1.2161 +#ifndef PREACTION
  1.2162 +#define PREACTION(M) (0)
  1.2163 +#endif  /* PREACTION */
  1.2164 +
  1.2165 +#ifndef POSTACTION
  1.2166 +#define POSTACTION(M)
  1.2167 +#endif  /* POSTACTION */
  1.2168 +
  1.2169 +#endif /* USE_LOCKS */
  1.2170 +
  1.2171 +/*
  1.2172 +  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  1.2173 +  USAGE_ERROR_ACTION is triggered on detected bad frees and
  1.2174 +  reallocs. The argument p is an address that might have triggered the
  1.2175 +  fault. It is ignored by the two predefined actions, but might be
  1.2176 +  useful in custom actions that try to help diagnose errors.
  1.2177 +*/
  1.2178 +
  1.2179 +#if PROCEED_ON_ERROR
  1.2180 +
  1.2181 +/* A count of the number of corruption errors causing resets */
  1.2182 +int malloc_corruption_error_count;
  1.2183 +
  1.2184 +/* default corruption action */
  1.2185 +static void reset_on_error(mstate m);
  1.2186 +
  1.2187 +#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
  1.2188 +#define USAGE_ERROR_ACTION(m, p)
  1.2189 +
  1.2190 +#else /* PROCEED_ON_ERROR */
  1.2191 +
  1.2192 +#ifndef CORRUPTION_ERROR_ACTION
  1.2193 +#define CORRUPTION_ERROR_ACTION(m) ABORT
  1.2194 +#endif /* CORRUPTION_ERROR_ACTION */
  1.2195 +
  1.2196 +#ifndef USAGE_ERROR_ACTION
  1.2197 +#define USAGE_ERROR_ACTION(m,p) ABORT
  1.2198 +#endif /* USAGE_ERROR_ACTION */
  1.2199 +
  1.2200 +#endif /* PROCEED_ON_ERROR */
  1.2201 +
  1.2202 +/* -------------------------- Debugging setup ---------------------------- */
  1.2203 +
  1.2204 +#if ! DEBUG
  1.2205 +
  1.2206 +#define check_free_chunk(M,P)
  1.2207 +#define check_inuse_chunk(M,P)
  1.2208 +#define check_malloced_chunk(M,P,N)
  1.2209 +#define check_mmapped_chunk(M,P)
  1.2210 +#define check_malloc_state(M)
  1.2211 +#define check_top_chunk(M,P)
  1.2212 +
  1.2213 +#else /* DEBUG */
  1.2214 +#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
  1.2215 +#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
  1.2216 +#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  1.2217 +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  1.2218 +#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  1.2219 +#define check_malloc_state(M)       do_check_malloc_state(M)
  1.2220 +
  1.2221 +static void   do_check_any_chunk(mstate m, mchunkptr p);
  1.2222 +static void   do_check_top_chunk(mstate m, mchunkptr p);
  1.2223 +static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  1.2224 +static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  1.2225 +static void   do_check_free_chunk(mstate m, mchunkptr p);
  1.2226 +static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  1.2227 +static void   do_check_tree(mstate m, tchunkptr t);
  1.2228 +static void   do_check_treebin(mstate m, bindex_t i);
  1.2229 +static void   do_check_smallbin(mstate m, bindex_t i);
  1.2230 +static void   do_check_malloc_state(mstate m);
  1.2231 +static int    bin_find(mstate m, mchunkptr x);
  1.2232 +static size_t traverse_and_check(mstate m);
  1.2233 +#endif /* DEBUG */
  1.2234 +
  1.2235 +/* ---------------------------- Indexing Bins ---------------------------- */
  1.2236 +
  1.2237 +#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
  1.2238 +#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
  1.2239 +#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
  1.2240 +#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
  1.2241 +
  1.2242 +/* addressing by index. See above about smallbin repositioning */
  1.2243 +#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
  1.2244 +#define treebin_at(M,i)     (&((M)->treebins[i]))
  1.2245 +
  1.2246 +/* assign tree index for size S to variable I */
  1.2247 +#if defined(__GNUC__) && defined(i386)
  1.2248 +#define compute_tree_index(S, I)\
  1.2249 +{\
  1.2250 +  size_t X = S >> TREEBIN_SHIFT;\
  1.2251 +  if (X == 0)\
  1.2252 +    I = 0;\
  1.2253 +  else if (X > 0xFFFF)\
  1.2254 +    I = NTREEBINS-1;\
  1.2255 +  else {\
  1.2256 +    unsigned int K;\
  1.2257 +    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
  1.2258 +    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
  1.2259 +  }\
  1.2260 +}
  1.2261 +#else /* GNUC */
  1.2262 +#define compute_tree_index(S, I)\
  1.2263 +{\
  1.2264 +  size_t X = S >> TREEBIN_SHIFT;\
  1.2265 +  if (X == 0)\
  1.2266 +    I = 0;\
  1.2267 +  else if (X > 0xFFFF)\
  1.2268 +    I = NTREEBINS-1;\
  1.2269 +  else {\
  1.2270 +    unsigned int Y = (unsigned int)X;\
  1.2271 +    unsigned int N = ((Y - 0x100) >> 16) & 8;\
  1.2272 +    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
  1.2273 +    N += K;\
  1.2274 +    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
  1.2275 +    K = 14 - N + ((Y <<= K) >> 15);\
  1.2276 +    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
  1.2277 +  }\
  1.2278 +}
  1.2279 +#endif /* GNUC */
  1.2280 +
  1.2281 +/* Bit representing maximum resolved size in a treebin at i */
  1.2282 +#define bit_for_tree_index(i) \
  1.2283 +   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
  1.2284 +
  1.2285 +/* Shift placing maximum resolved bit in a treebin at i as sign bit */
  1.2286 +#define leftshift_for_tree_index(i) \
  1.2287 +   ((i == NTREEBINS-1)? 0 : \
  1.2288 +    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
  1.2289 +
  1.2290 +/* The size of the smallest chunk held in bin with index i */
  1.2291 +#define minsize_for_tree_index(i) \
  1.2292 +   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
  1.2293 +   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
  1.2294 +
  1.2295 +
  1.2296 +/* ------------------------ Operations on bin maps ----------------------- */
  1.2297 +
  1.2298 +/* bit corresponding to given index */
  1.2299 +#define idx2bit(i)              ((binmap_t)(1) << (i))
  1.2300 +
  1.2301 +/* Mark/Clear bits with given index */
  1.2302 +#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
  1.2303 +#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
  1.2304 +#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
  1.2305 +
  1.2306 +#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
  1.2307 +#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
  1.2308 +#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
  1.2309 +
  1.2310 +/* index corresponding to given bit */
  1.2311 +
  1.2312 +#if defined(__GNUC__) && defined(i386)
  1.2313 +#define compute_bit2idx(X, I)\
  1.2314 +{\
  1.2315 +  unsigned int J;\
  1.2316 +  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
  1.2317 +  I = (bindex_t)J;\
  1.2318 +}
  1.2319 +
  1.2320 +#else /* GNUC */
  1.2321 +#if  USE_BUILTIN_FFS
  1.2322 +#define compute_bit2idx(X, I) I = ffs(X)-1
  1.2323 +
  1.2324 +#else /* USE_BUILTIN_FFS */
  1.2325 +#define compute_bit2idx(X, I)\
  1.2326 +{\
  1.2327 +  unsigned int Y = X - 1;\
  1.2328 +  unsigned int K = Y >> (16-4) & 16;\
  1.2329 +  unsigned int N = K;        Y >>= K;\
  1.2330 +  N += K = Y >> (8-3) &  8;  Y >>= K;\
  1.2331 +  N += K = Y >> (4-2) &  4;  Y >>= K;\
  1.2332 +  N += K = Y >> (2-1) &  2;  Y >>= K;\
  1.2333 +  N += K = Y >> (1-0) &  1;  Y >>= K;\
  1.2334 +  I = (bindex_t)(N + Y);\
  1.2335 +}
  1.2336 +#endif /* USE_BUILTIN_FFS */
  1.2337 +#endif /* GNUC */
  1.2338 +
  1.2339 +/* isolate the least set bit of a bitmap */
  1.2340 +#define least_bit(x)         ((x) & -(x))
  1.2341 +
  1.2342 +/* mask with all bits to left of least bit of x on */
  1.2343 +#define left_bits(x)         ((x<<1) | -(x<<1))
  1.2344 +
  1.2345 +/* mask with all bits to left of or equal to least bit of x on */
  1.2346 +#define same_or_left_bits(x) ((x) | -(x))
  1.2347 +
  1.2348 +
  1.2349 +/* ----------------------- Runtime Check Support ------------------------- */
  1.2350 +
  1.2351 +/*
  1.2352 +  For security, the main invariant is that malloc/free/etc never
  1.2353 +  writes to a static address other than malloc_state, unless static
  1.2354 +  malloc_state itself has been corrupted, which cannot occur via
  1.2355 +  malloc (because of these checks). In essence this means that we
  1.2356 +  believe all pointers, sizes, maps etc held in malloc_state, but
  1.2357 +  check all of those linked or offsetted from other embedded data
  1.2358 +  structures.  These checks are interspersed with main code in a way
  1.2359 +  that tends to minimize their run-time cost.
  1.2360 +
  1.2361 +  When FOOTERS is defined, in addition to range checking, we also
  1.2362 +  verify footer fields of inuse chunks, which can be used guarantee
  1.2363 +  that the mstate controlling malloc/free is intact.  This is a
  1.2364 +  streamlined version of the approach described by William Robertson
  1.2365 +  et al in "Run-time Detection of Heap-based Overflows" LISA'03
  1.2366 +  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
  1.2367 +  of an inuse chunk holds the xor of its mstate and a random seed,
  1.2368 +  that is checked upon calls to free() and realloc().  This is
  1.2369 +  (probablistically) unguessable from outside the program, but can be
  1.2370 +  computed by any code successfully malloc'ing any chunk, so does not
  1.2371 +  itself provide protection against code that has already broken
  1.2372 +  security through some other means.  Unlike Robertson et al, we
  1.2373 +  always dynamically check addresses of all offset chunks (previous,
  1.2374 +  next, etc). This turns out to be cheaper than relying on hashes.
  1.2375 +*/
  1.2376 +
  1.2377 +#if !INSECURE
  1.2378 +/* Check if address a is at least as high as any from MORECORE or MMAP */
  1.2379 +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
  1.2380 +/* Check if address of next chunk n is higher than base chunk p */
  1.2381 +#define ok_next(p, n)    ((char*)(p) < (char*)(n))
  1.2382 +/* Check if p has its cinuse bit on */
  1.2383 +#define ok_cinuse(p)     cinuse(p)
  1.2384 +/* Check if p has its pinuse bit on */
  1.2385 +#define ok_pinuse(p)     pinuse(p)
  1.2386 +
  1.2387 +#else /* !INSECURE */
  1.2388 +#define ok_address(M, a) (1)
  1.2389 +#define ok_next(b, n)    (1)
  1.2390 +#define ok_cinuse(p)     (1)
  1.2391 +#define ok_pinuse(p)     (1)
  1.2392 +#endif /* !INSECURE */
  1.2393 +
  1.2394 +#if (FOOTERS && !INSECURE)
  1.2395 +/* Check if (alleged) mstate m has expected magic field */
  1.2396 +#define ok_magic(M)      ((M)->magic == mparams.magic)
  1.2397 +#else  /* (FOOTERS && !INSECURE) */
  1.2398 +#define ok_magic(M)      (1)
  1.2399 +#endif /* (FOOTERS && !INSECURE) */
  1.2400 +
  1.2401 +
  1.2402 +/* In gcc, use __builtin_expect to minimize impact of checks */
  1.2403 +#if !INSECURE
  1.2404 +#if defined(__GNUC__) && __GNUC__ >= 3
  1.2405 +#define RTCHECK(e)  __builtin_expect(e, 1)
  1.2406 +#else /* GNUC */
  1.2407 +#define RTCHECK(e)  (e)
  1.2408 +#endif /* GNUC */
  1.2409 +#else /* !INSECURE */
  1.2410 +#define RTCHECK(e)  (1)
  1.2411 +#endif /* !INSECURE */
  1.2412 +
  1.2413 +/* macros to set up inuse chunks with or without footers */
  1.2414 +
  1.2415 +#if !FOOTERS
  1.2416 +
  1.2417 +#define mark_inuse_foot(M,p,s)
  1.2418 +
  1.2419 +/* Set cinuse bit and pinuse bit of next chunk */
  1.2420 +#define set_inuse(M,p,s)\
  1.2421 +  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  1.2422 +  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  1.2423 +
  1.2424 +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
  1.2425 +#define set_inuse_and_pinuse(M,p,s)\
  1.2426 +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  1.2427 +  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
  1.2428 +
  1.2429 +/* Set size, cinuse and pinuse bit of this chunk */
  1.2430 +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  1.2431 +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
  1.2432 +
  1.2433 +#else /* FOOTERS */
  1.2434 +
  1.2435 +/* Set foot of inuse chunk to be xor of mstate and seed */
  1.2436 +#define mark_inuse_foot(M,p,s)\
  1.2437 +  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
  1.2438 +
  1.2439 +#define get_mstate_for(p)\
  1.2440 +  ((mstate)(((mchunkptr)((char*)(p) +\
  1.2441 +    (chunksize(p))))->prev_foot ^ mparams.magic))
  1.2442 +
  1.2443 +#define set_inuse(M,p,s)\
  1.2444 +  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
  1.2445 +  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
  1.2446 +  mark_inuse_foot(M,p,s))
  1.2447 +
  1.2448 +#define set_inuse_and_pinuse(M,p,s)\
  1.2449 +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  1.2450 +  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
  1.2451 + mark_inuse_foot(M,p,s))
  1.2452 +
  1.2453 +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
  1.2454 +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
  1.2455 +  mark_inuse_foot(M, p, s))
  1.2456 +
  1.2457 +#endif /* !FOOTERS */
  1.2458 +
  1.2459 +/* ---------------------------- setting mparams -------------------------- */
  1.2460 +
  1.2461 +/* Initialize mparams */
  1.2462 +static int init_mparams(void) {
  1.2463 +  if (mparams.page_size == 0) {
  1.2464 +    size_t s;
  1.2465 +
  1.2466 +    mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  1.2467 +    mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  1.2468 +#if MORECORE_CONTIGUOUS
  1.2469 +    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  1.2470 +#else  /* MORECORE_CONTIGUOUS */
  1.2471 +    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  1.2472 +#endif /* MORECORE_CONTIGUOUS */
  1.2473 +
  1.2474 +#if (FOOTERS && !INSECURE)
  1.2475 +    {
  1.2476 +#if USE_DEV_RANDOM
  1.2477 +      int fd;
  1.2478 +      unsigned char buf[sizeof(size_t)];
  1.2479 +      /* Try to use /dev/urandom, else fall back on using time */
  1.2480 +      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  1.2481 +          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  1.2482 +        s = *((size_t *) buf);
  1.2483 +        close(fd);
  1.2484 +      }
  1.2485 +      else
  1.2486 +#endif /* USE_DEV_RANDOM */
  1.2487 +        s = (size_t)(time(0) ^ (size_t)0x55555555U);
  1.2488 +
  1.2489 +      s |= (size_t)8U;    /* ensure nonzero */
  1.2490 +      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  1.2491 +
  1.2492 +    }
  1.2493 +#else /* (FOOTERS && !INSECURE) */
  1.2494 +    s = (size_t)0x58585858U;
  1.2495 +#endif /* (FOOTERS && !INSECURE) */
  1.2496 +    ACQUIRE_MAGIC_INIT_LOCK();
  1.2497 +    if (mparams.magic == 0) {
  1.2498 +      mparams.magic = s;
  1.2499 +      /* Set up lock for main malloc area */
  1.2500 +      INITIAL_LOCK(&gm->mutex);
  1.2501 +      gm->mflags = mparams.default_mflags;
  1.2502 +    }
  1.2503 +    RELEASE_MAGIC_INIT_LOCK();
  1.2504 +
  1.2505 +#ifndef WIN32
  1.2506 +    mparams.page_size = malloc_getpagesize;
  1.2507 +    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  1.2508 +                           DEFAULT_GRANULARITY : mparams.page_size);
  1.2509 +#else /* WIN32 */
  1.2510 +    {
  1.2511 +      SYSTEM_INFO system_info;
  1.2512 +      GetSystemInfo(&system_info);
  1.2513 +      mparams.page_size = system_info.dwPageSize;
  1.2514 +      mparams.granularity = system_info.dwAllocationGranularity;
  1.2515 +    }
  1.2516 +#endif /* WIN32 */
  1.2517 +
  1.2518 +    /* Sanity-check configuration:
  1.2519 +       size_t must be unsigned and as wide as pointer type.
  1.2520 +       ints must be at least 4 bytes.
  1.2521 +       alignment must be at least 8.
  1.2522 +       Alignment, min chunk size, and page size must all be powers of 2.
  1.2523 +    */
  1.2524 +    if ((sizeof(size_t) != sizeof(char*)) ||
  1.2525 +        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  1.2526 +        (sizeof(int) < 4)  ||
  1.2527 +        (MALLOC_ALIGNMENT < (size_t)8U) ||
  1.2528 +        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  1.2529 +        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  1.2530 +        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  1.2531 +        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  1.2532 +      ABORT;
  1.2533 +  }
  1.2534 +  return 0;
  1.2535 +}
  1.2536 +
  1.2537 +/* support for mallopt */
  1.2538 +static int change_mparam(int param_number, int value) {
  1.2539 +  size_t val = (size_t)value;
  1.2540 +  init_mparams();
  1.2541 +  switch(param_number) {
  1.2542 +  case M_TRIM_THRESHOLD:
  1.2543 +    mparams.trim_threshold = val;
  1.2544 +    return 1;
  1.2545 +  case M_GRANULARITY:
  1.2546 +    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  1.2547 +      mparams.granularity = val;
  1.2548 +      return 1;
  1.2549 +    }
  1.2550 +    else
  1.2551 +      return 0;
  1.2552 +  case M_MMAP_THRESHOLD:
  1.2553 +    mparams.mmap_threshold = val;
  1.2554 +    return 1;
  1.2555 +  default:
  1.2556 +    return 0;
  1.2557 +  }
  1.2558 +}
  1.2559 +
  1.2560 +#if DEBUG
  1.2561 +/* ------------------------- Debugging Support --------------------------- */
  1.2562 +
  1.2563 +/* Check properties of any chunk, whether free, inuse, mmapped etc  */
  1.2564 +static void do_check_any_chunk(mstate m, mchunkptr p) {
  1.2565 +  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  1.2566 +  assert(ok_address(m, p));
  1.2567 +}
  1.2568 +
  1.2569 +/* Check properties of top chunk */
  1.2570 +static void do_check_top_chunk(mstate m, mchunkptr p) {
  1.2571 +  msegmentptr sp = segment_holding(m, (char*)p);
  1.2572 +  size_t  sz = chunksize(p);
  1.2573 +  assert(sp != 0);
  1.2574 +  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  1.2575 +  assert(ok_address(m, p));
  1.2576 +  assert(sz == m->topsize);
  1.2577 +  assert(sz > 0);
  1.2578 +  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  1.2579 +  assert(pinuse(p));
  1.2580 +  assert(!next_pinuse(p));
  1.2581 +}
  1.2582 +
  1.2583 +/* Check properties of (inuse) mmapped chunks */
  1.2584 +static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  1.2585 +  size_t  sz = chunksize(p);
  1.2586 +  size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  1.2587 +  assert(is_mmapped(p));
  1.2588 +  assert(use_mmap(m));
  1.2589 +  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  1.2590 +  assert(ok_address(m, p));
  1.2591 +  assert(!is_small(sz));
  1.2592 +  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  1.2593 +  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  1.2594 +  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  1.2595 +}
  1.2596 +
  1.2597 +/* Check properties of inuse chunks */
  1.2598 +static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  1.2599 +  do_check_any_chunk(m, p);
  1.2600 +  assert(cinuse(p));
  1.2601 +  assert(next_pinuse(p));
  1.2602 +  /* If not pinuse and not mmapped, previous chunk has OK offset */
  1.2603 +  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  1.2604 +  if (is_mmapped(p))
  1.2605 +    do_check_mmapped_chunk(m, p);
  1.2606 +}
  1.2607 +
  1.2608 +/* Check properties of free chunks */
  1.2609 +static void do_check_free_chunk(mstate m, mchunkptr p) {
  1.2610 +  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  1.2611 +  mchunkptr next = chunk_plus_offset(p, sz);
  1.2612 +  do_check_any_chunk(m, p);
  1.2613 +  assert(!cinuse(p));
  1.2614 +  assert(!next_pinuse(p));
  1.2615 +  assert (!is_mmapped(p));
  1.2616 +  if (p != m->dv && p != m->top) {
  1.2617 +    if (sz >= MIN_CHUNK_SIZE) {
  1.2618 +      assert((sz & CHUNK_ALIGN_MASK) == 0);
  1.2619 +      assert(is_aligned(chunk2mem(p)));
  1.2620 +      assert(next->prev_foot == sz);
  1.2621 +      assert(pinuse(p));
  1.2622 +      assert (next == m->top || cinuse(next));
  1.2623 +      assert(p->fd->bk == p);
  1.2624 +      assert(p->bk->fd == p);
  1.2625 +    }
  1.2626 +    else  /* markers are always of size SIZE_T_SIZE */
  1.2627 +      assert(sz == SIZE_T_SIZE);
  1.2628 +  }
  1.2629 +}
  1.2630 +
  1.2631 +/* Check properties of malloced chunks at the point they are malloced */
  1.2632 +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  1.2633 +  if (mem != 0) {
  1.2634 +    mchunkptr p = mem2chunk(mem);
  1.2635 +    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  1.2636 +    do_check_inuse_chunk(m, p);
  1.2637 +    assert((sz & CHUNK_ALIGN_MASK) == 0);
  1.2638 +    assert(sz >= MIN_CHUNK_SIZE);
  1.2639 +    assert(sz >= s);
  1.2640 +    /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  1.2641 +    assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  1.2642 +  }
  1.2643 +}
  1.2644 +
  1.2645 +/* Check a tree and its subtrees.  */
  1.2646 +static void do_check_tree(mstate m, tchunkptr t) {
  1.2647 +  tchunkptr head = 0;
  1.2648 +  tchunkptr u = t;
  1.2649 +  bindex_t tindex = t->index;
  1.2650 +  size_t tsize = chunksize(t);
  1.2651 +  bindex_t idx;
  1.2652 +  compute_tree_index(tsize, idx);
  1.2653 +  assert(tindex == idx);
  1.2654 +  assert(tsize >= MIN_LARGE_SIZE);
  1.2655 +  assert(tsize >= minsize_for_tree_index(idx));
  1.2656 +  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  1.2657 +
  1.2658 +  do { /* traverse through chain of same-sized nodes */
  1.2659 +    do_check_any_chunk(m, ((mchunkptr)u));
  1.2660 +    assert(u->index == tindex);
  1.2661 +    assert(chunksize(u) == tsize);
  1.2662 +    assert(!cinuse(u));
  1.2663 +    assert(!next_pinuse(u));
  1.2664 +    assert(u->fd->bk == u);
  1.2665 +    assert(u->bk->fd == u);
  1.2666 +    if (u->parent == 0) {
  1.2667 +      assert(u->child[0] == 0);
  1.2668 +      assert(u->child[1] == 0);
  1.2669 +    }
  1.2670 +    else {
  1.2671 +      assert(head == 0); /* only one node on chain has parent */
  1.2672 +      head = u;
  1.2673 +      assert(u->parent != u);
  1.2674 +      assert (u->parent->child[0] == u ||
  1.2675 +              u->parent->child[1] == u ||
  1.2676 +              *((tbinptr*)(u->parent)) == u);
  1.2677 +      if (u->child[0] != 0) {
  1.2678 +        assert(u->child[0]->parent == u);
  1.2679 +        assert(u->child[0] != u);
  1.2680 +        do_check_tree(m, u->child[0]);
  1.2681 +      }
  1.2682 +      if (u->child[1] != 0) {
  1.2683 +        assert(u->child[1]->parent == u);
  1.2684 +        assert(u->child[1] != u);
  1.2685 +        do_check_tree(m, u->child[1]);
  1.2686 +      }
  1.2687 +      if (u->child[0] != 0 && u->child[1] != 0) {
  1.2688 +        assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  1.2689 +      }
  1.2690 +    }
  1.2691 +    u = u->fd;
  1.2692 +  } while (u != t);
  1.2693 +  assert(head != 0);
  1.2694 +}
  1.2695 +
  1.2696 +/*  Check all the chunks in a treebin.  */
  1.2697 +static void do_check_treebin(mstate m, bindex_t i) {
  1.2698 +  tbinptr* tb = treebin_at(m, i);
  1.2699 +  tchunkptr t = *tb;
  1.2700 +  int empty = (m->treemap & (1U << i)) == 0;
  1.2701 +  if (t == 0)
  1.2702 +    assert(empty);
  1.2703 +  if (!empty)
  1.2704 +    do_check_tree(m, t);
  1.2705 +}
  1.2706 +
  1.2707 +/*  Check all the chunks in a smallbin.  */
  1.2708 +static void do_check_smallbin(mstate m, bindex_t i) {
  1.2709 +  sbinptr b = smallbin_at(m, i);
  1.2710 +  mchunkptr p = b->bk;
  1.2711 +  unsigned int empty = (m->smallmap & (1U << i)) == 0;
  1.2712 +  if (p == b)
  1.2713 +    assert(empty);
  1.2714 +  if (!empty) {
  1.2715 +    for (; p != b; p = p->bk) {
  1.2716 +      size_t size = chunksize(p);
  1.2717 +      mchunkptr q;
  1.2718 +      /* each chunk claims to be free */
  1.2719 +      do_check_free_chunk(m, p);
  1.2720 +      /* chunk belongs in bin */
  1.2721 +      assert(small_index(size) == i);
  1.2722 +      assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  1.2723 +      /* chunk is followed by an inuse chunk */
  1.2724 +      q = next_chunk(p);
  1.2725 +      if (q->head != FENCEPOST_HEAD)
  1.2726 +        do_check_inuse_chunk(m, q);
  1.2727 +    }
  1.2728 +  }
  1.2729 +}
  1.2730 +
  1.2731 +/* Find x in a bin. Used in other check functions. */
  1.2732 +static int bin_find(mstate m, mchunkptr x) {
  1.2733 +  size_t size = chunksize(x);
  1.2734 +  if (is_small(size)) {
  1.2735 +    bindex_t sidx = small_index(size);
  1.2736 +    sbinptr b = smallbin_at(m, sidx);
  1.2737 +    if (smallmap_is_marked(m, sidx)) {
  1.2738 +      mchunkptr p = b;
  1.2739 +      do {
  1.2740 +        if (p == x)
  1.2741 +          return 1;
  1.2742 +      } while ((p = p->fd) != b);
  1.2743 +    }
  1.2744 +  }
  1.2745 +  else {
  1.2746 +    bindex_t tidx;
  1.2747 +    compute_tree_index(size, tidx);
  1.2748 +    if (treemap_is_marked(m, tidx)) {
  1.2749 +      tchunkptr t = *treebin_at(m, tidx);
  1.2750 +      size_t sizebits = size << leftshift_for_tree_index(tidx);
  1.2751 +      while (t != 0 && chunksize(t) != size) {
  1.2752 +        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  1.2753 +        sizebits <<= 1;
  1.2754 +      }
  1.2755 +      if (t != 0) {
  1.2756 +        tchunkptr u = t;
  1.2757 +        do {
  1.2758 +          if (u == (tchunkptr)x)
  1.2759 +            return 1;
  1.2760 +        } while ((u = u->fd) != t);
  1.2761 +      }
  1.2762 +    }
  1.2763 +  }
  1.2764 +  return 0;
  1.2765 +}
  1.2766 +
  1.2767 +/* Traverse each chunk and check it; return total */
  1.2768 +static size_t traverse_and_check(mstate m) {
  1.2769 +  size_t sum = 0;
  1.2770 +  if (is_initialized(m)) {
  1.2771 +    msegmentptr s = &m->seg;
  1.2772 +    sum += m->topsize + TOP_FOOT_SIZE;
  1.2773 +    while (s != 0) {
  1.2774 +      mchunkptr q = align_as_chunk(s->base);
  1.2775 +      mchunkptr lastq = 0;
  1.2776 +      assert(pinuse(q));
  1.2777 +      while (segment_holds(s, q) &&
  1.2778 +             q != m->top && q->head != FENCEPOST_HEAD) {
  1.2779 +        sum += chunksize(q);
  1.2780 +        if (cinuse(q)) {
  1.2781 +          assert(!bin_find(m, q));
  1.2782 +          do_check_inuse_chunk(m, q);
  1.2783 +        }
  1.2784 +        else {
  1.2785 +          assert(q == m->dv || bin_find(m, q));
  1.2786 +          assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  1.2787 +          do_check_free_chunk(m, q);
  1.2788 +        }
  1.2789 +        lastq = q;
  1.2790 +        q = next_chunk(q);
  1.2791 +      }
  1.2792 +      s = s->next;
  1.2793 +    }
  1.2794 +  }
  1.2795 +  return sum;
  1.2796 +}
  1.2797 +
  1.2798 +/* Check all properties of malloc_state. */
  1.2799 +static void do_check_malloc_state(mstate m) {
  1.2800 +  bindex_t i;
  1.2801 +  size_t total;
  1.2802 +  /* check bins */
  1.2803 +  for (i = 0; i < NSMALLBINS; ++i)
  1.2804 +    do_check_smallbin(m, i);
  1.2805 +  for (i = 0; i < NTREEBINS; ++i)
  1.2806 +    do_check_treebin(m, i);
  1.2807 +
  1.2808 +  if (m->dvsize != 0) { /* check dv chunk */
  1.2809 +    do_check_any_chunk(m, m->dv);
  1.2810 +    assert(m->dvsize == chunksize(m->dv));
  1.2811 +    assert(m->dvsize >= MIN_CHUNK_SIZE);
  1.2812 +    assert(bin_find(m, m->dv) == 0);
  1.2813 +  }
  1.2814 +
  1.2815 +  if (m->top != 0) {   /* check top chunk */
  1.2816 +    do_check_top_chunk(m, m->top);
  1.2817 +    assert(m->topsize == chunksize(m->top));
  1.2818 +    assert(m->topsize > 0);
  1.2819 +    assert(bin_find(m, m->top) == 0);
  1.2820 +  }
  1.2821 +
  1.2822 +  total = traverse_and_check(m);
  1.2823 +  assert(total <= m->footprint);
  1.2824 +  assert(m->footprint <= m->max_footprint);
  1.2825 +}
  1.2826 +#endif /* DEBUG */
  1.2827 +
  1.2828 +/* ----------------------------- statistics ------------------------------ */
  1.2829 +
  1.2830 +#if !NO_MALLINFO
  1.2831 +static struct mallinfo internal_mallinfo(mstate m) {
  1.2832 +  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  1.2833 +  if (!PREACTION(m)) {
  1.2834 +    check_malloc_state(m);
  1.2835 +    if (is_initialized(m)) {
  1.2836 +      size_t nfree = SIZE_T_ONE; /* top always free */
  1.2837 +      size_t mfree = m->topsize + TOP_FOOT_SIZE;
  1.2838 +      size_t sum = mfree;
  1.2839 +      msegmentptr s = &m->seg;
  1.2840 +      while (s != 0) {
  1.2841 +        mchunkptr q = align_as_chunk(s->base);
  1.2842 +        while (segment_holds(s, q) &&
  1.2843 +               q != m->top && q->head != FENCEPOST_HEAD) {
  1.2844 +          size_t sz = chunksize(q);
  1.2845 +          sum += sz;
  1.2846 +          if (!cinuse(q)) {
  1.2847 +            mfree += sz;
  1.2848 +            ++nfree;
  1.2849 +          }
  1.2850 +          q = next_chunk(q);
  1.2851 +        }
  1.2852 +        s = s->next;
  1.2853 +      }
  1.2854 +
  1.2855 +      nm.arena    = sum;
  1.2856 +      nm.ordblks  = nfree;
  1.2857 +      nm.hblkhd   = m->footprint - sum;
  1.2858 +      nm.usmblks  = m->max_footprint;
  1.2859 +      nm.uordblks = m->footprint - mfree;
  1.2860 +      nm.fordblks = mfree;
  1.2861 +      nm.keepcost = m->topsize;
  1.2862 +    }
  1.2863 +
  1.2864 +    POSTACTION(m);
  1.2865 +  }
  1.2866 +  return nm;
  1.2867 +}
  1.2868 +#endif /* !NO_MALLINFO */
  1.2869 +
  1.2870 +static void internal_malloc_stats(mstate m) {
  1.2871 +  if (!PREACTION(m)) {
  1.2872 +    size_t maxfp = 0;
  1.2873 +    size_t fp = 0;
  1.2874 +    size_t used = 0;
  1.2875 +    check_malloc_state(m);
  1.2876 +    if (is_initialized(m)) {
  1.2877 +      msegmentptr s = &m->seg;
  1.2878 +      maxfp = m->max_footprint;
  1.2879 +      fp = m->footprint;
  1.2880 +      used = fp - (m->topsize + TOP_FOOT_SIZE);
  1.2881 +
  1.2882 +      while (s != 0) {
  1.2883 +        mchunkptr q = align_as_chunk(s->base);
  1.2884 +        while (segment_holds(s, q) &&
  1.2885 +               q != m->top && q->head != FENCEPOST_HEAD) {
  1.2886 +          if (!cinuse(q))
  1.2887 +            used -= chunksize(q);
  1.2888 +          q = next_chunk(q);
  1.2889 +        }
  1.2890 +        s = s->next;
  1.2891 +      }
  1.2892 +    }
  1.2893 +
  1.2894 +#ifndef LACKS_STDIO_H
  1.2895 +    fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  1.2896 +    fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  1.2897 +    fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  1.2898 +#endif
  1.2899 +
  1.2900 +    POSTACTION(m);
  1.2901 +  }
  1.2902 +}
  1.2903 +
  1.2904 +/* ----------------------- Operations on smallbins ----------------------- */
  1.2905 +
  1.2906 +/*
  1.2907 +  Various forms of linking and unlinking are defined as macros.  Even
  1.2908 +  the ones for trees, which are very long but have very short typical
  1.2909 +  paths.  This is ugly but reduces reliance on inlining support of
  1.2910 +  compilers.
  1.2911 +*/
  1.2912 +
  1.2913 +/* Link a free chunk into a smallbin  */
  1.2914 +#define insert_small_chunk(M, P, S) {\
  1.2915 +  bindex_t I  = small_index(S);\
  1.2916 +  mchunkptr B = smallbin_at(M, I);\
  1.2917 +  mchunkptr F = B;\
  1.2918 +  assert(S >= MIN_CHUNK_SIZE);\
  1.2919 +  if (!smallmap_is_marked(M, I))\
  1.2920 +    mark_smallmap(M, I);\
  1.2921 +  else if (RTCHECK(ok_address(M, B->fd)))\
  1.2922 +    F = B->fd;\
  1.2923 +  else {\
  1.2924 +    CORRUPTION_ERROR_ACTION(M);\
  1.2925 +  }\
  1.2926 +  B->fd = P;\
  1.2927 +  F->bk = P;\
  1.2928 +  P->fd = F;\
  1.2929 +  P->bk = B;\
  1.2930 +}
  1.2931 +
  1.2932 +/* Unlink a chunk from a smallbin  */
  1.2933 +#define unlink_small_chunk(M, P, S) {\
  1.2934 +  mchunkptr F = P->fd;\
  1.2935 +  mchunkptr B = P->bk;\
  1.2936 +  bindex_t I = small_index(S);\
  1.2937 +  assert(P != B);\
  1.2938 +  assert(P != F);\
  1.2939 +  assert(chunksize(P) == small_index2size(I));\
  1.2940 +  if (F == B)\
  1.2941 +    clear_smallmap(M, I);\
  1.2942 +  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
  1.2943 +                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
  1.2944 +    F->bk = B;\
  1.2945 +    B->fd = F;\
  1.2946 +  }\
  1.2947 +  else {\
  1.2948 +    CORRUPTION_ERROR_ACTION(M);\
  1.2949 +  }\
  1.2950 +}
  1.2951 +
  1.2952 +/* Unlink the first chunk from a smallbin */
  1.2953 +#define unlink_first_small_chunk(M, B, P, I) {\
  1.2954 +  mchunkptr F = P->fd;\
  1.2955 +  assert(P != B);\
  1.2956 +  assert(P != F);\
  1.2957 +  assert(chunksize(P) == small_index2size(I));\
  1.2958 +  if (B == F)\
  1.2959 +    clear_smallmap(M, I);\
  1.2960 +  else if (RTCHECK(ok_address(M, F))) {\
  1.2961 +    B->fd = F;\
  1.2962 +    F->bk = B;\
  1.2963 +  }\
  1.2964 +  else {\
  1.2965 +    CORRUPTION_ERROR_ACTION(M);\
  1.2966 +  }\
  1.2967 +}
  1.2968 +
  1.2969 +/* Replace dv node, binning the old one */
  1.2970 +/* Used only when dvsize known to be small */
  1.2971 +#define replace_dv(M, P, S) {\
  1.2972 +  size_t DVS = M->dvsize;\
  1.2973 +  if (DVS != 0) {\
  1.2974 +    mchunkptr DV = M->dv;\
  1.2975 +    assert(is_small(DVS));\
  1.2976 +    insert_small_chunk(M, DV, DVS);\
  1.2977 +  }\
  1.2978 +  M->dvsize = S;\
  1.2979 +  M->dv = P;\
  1.2980 +}
  1.2981 +
  1.2982 +/* ------------------------- Operations on trees ------------------------- */
  1.2983 +
  1.2984 +/* Insert chunk into tree */
  1.2985 +#define insert_large_chunk(M, X, S) {\
  1.2986 +  tbinptr* H;\
  1.2987 +  bindex_t I;\
  1.2988 +  compute_tree_index(S, I);\
  1.2989 +  H = treebin_at(M, I);\
  1.2990 +  X->index = I;\
  1.2991 +  X->child[0] = X->child[1] = 0;\
  1.2992 +  if (!treemap_is_marked(M, I)) {\
  1.2993 +    mark_treemap(M, I);\
  1.2994 +    *H = X;\
  1.2995 +    X->parent = (tchunkptr)H;\
  1.2996 +    X->fd = X->bk = X;\
  1.2997 +  }\
  1.2998 +  else {\
  1.2999 +    tchunkptr T = *H;\
  1.3000 +    size_t K = S << leftshift_for_tree_index(I);\
  1.3001 +    for (;;) {\
  1.3002 +      if (chunksize(T) != S) {\
  1.3003 +        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
  1.3004 +        K <<= 1;\
  1.3005 +        if (*C != 0)\
  1.3006 +          T = *C;\
  1.3007 +        else if (RTCHECK(ok_address(M, C))) {\
  1.3008 +          *C = X;\
  1.3009 +          X->parent = T;\
  1.3010 +          X->fd = X->bk = X;\
  1.3011 +          break;\
  1.3012 +        }\
  1.3013 +        else {\
  1.3014 +          CORRUPTION_ERROR_ACTION(M);\
  1.3015 +          break;\
  1.3016 +        }\
  1.3017 +      }\
  1.3018 +      else {\
  1.3019 +        tchunkptr F = T->fd;\
  1.3020 +        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
  1.3021 +          T->fd = F->bk = X;\
  1.3022 +          X->fd = F;\
  1.3023 +          X->bk = T;\
  1.3024 +          X->parent = 0;\
  1.3025 +          break;\
  1.3026 +        }\
  1.3027 +        else {\
  1.3028 +          CORRUPTION_ERROR_ACTION(M);\
  1.3029 +          break;\
  1.3030 +        }\
  1.3031 +      }\
  1.3032 +    }\
  1.3033 +  }\
  1.3034 +}
  1.3035 +
  1.3036 +/*
  1.3037 +  Unlink steps:
  1.3038 +
  1.3039 +  1. If x is a chained node, unlink it from its same-sized fd/bk links
  1.3040 +     and choose its bk node as its replacement.
  1.3041 +  2. If x was the last node of its size, but not a leaf node, it must
  1.3042 +     be replaced with a leaf node (not merely one with an open left or
  1.3043 +     right), to make sure that lefts and rights of descendents
  1.3044 +     correspond properly to bit masks.  We use the rightmost descendent
  1.3045 +     of x.  We could use any other leaf, but this is easy to locate and
  1.3046 +     tends to counteract removal of leftmosts elsewhere, and so keeps
  1.3047 +     paths shorter than minimally guaranteed.  This doesn't loop much
  1.3048 +     because on average a node in a tree is near the bottom.
  1.3049 +  3. If x is the base of a chain (i.e., has parent links) relink
  1.3050 +     x's parent and children to x's replacement (or null if none).
  1.3051 +*/
  1.3052 +
  1.3053 +#define unlink_large_chunk(M, X) {\
  1.3054 +  tchunkptr XP = X->parent;\
  1.3055 +  tchunkptr R;\
  1.3056 +  if (X->bk != X) {\
  1.3057 +    tchunkptr F = X->fd;\
  1.3058 +    R = X->bk;\
  1.3059 +    if (RTCHECK(ok_address(M, F))) {\
  1.3060 +      F->bk = R;\
  1.3061 +      R->fd = F;\
  1.3062 +    }\
  1.3063 +    else {\
  1.3064 +      CORRUPTION_ERROR_ACTION(M);\
  1.3065 +    }\
  1.3066 +  }\
  1.3067 +  else {\
  1.3068 +    tchunkptr* RP;\
  1.3069 +    if (((R = *(RP = &(X->child[1]))) != 0) ||\
  1.3070 +        ((R = *(RP = &(X->child[0]))) != 0)) {\
  1.3071 +      tchunkptr* CP;\
  1.3072 +      while ((*(CP = &(R->child[1])) != 0) ||\
  1.3073 +             (*(CP = &(R->child[0])) != 0)) {\
  1.3074 +        R = *(RP = CP);\
  1.3075 +      }\
  1.3076 +      if (RTCHECK(ok_address(M, RP)))\
  1.3077 +        *RP = 0;\
  1.3078 +      else {\
  1.3079 +        CORRUPTION_ERROR_ACTION(M);\
  1.3080 +      }\
  1.3081 +    }\
  1.3082 +  }\
  1.3083 +  if (XP != 0) {\
  1.3084 +    tbinptr* H = treebin_at(M, X->index);\
  1.3085 +    if (X == *H) {\
  1.3086 +      if ((*H = R) == 0) \
  1.3087 +        clear_treemap(M, X->index);\
  1.3088 +    }\
  1.3089 +    else if (RTCHECK(ok_address(M, XP))) {\
  1.3090 +      if (XP->child[0] == X) \
  1.3091 +        XP->child[0] = R;\
  1.3092 +      else \
  1.3093 +        XP->child[1] = R;\
  1.3094 +    }\
  1.3095 +    else\
  1.3096 +      CORRUPTION_ERROR_ACTION(M);\
  1.3097 +    if (R != 0) {\
  1.3098 +      if (RTCHECK(ok_address(M, R))) {\
  1.3099 +        tchunkptr C0, C1;\
  1.3100 +        R->parent = XP;\
  1.3101 +        if ((C0 = X->child[0]) != 0) {\
  1.3102 +          if (RTCHECK(ok_address(M, C0))) {\
  1.3103 +            R->child[0] = C0;\
  1.3104 +            C0->parent = R;\
  1.3105 +          }\
  1.3106 +          else\
  1.3107 +            CORRUPTION_ERROR_ACTION(M);\
  1.3108 +        }\
  1.3109 +        if ((C1 = X->child[1]) != 0) {\
  1.3110 +          if (RTCHECK(ok_address(M, C1))) {\
  1.3111 +            R->child[1] = C1;\
  1.3112 +            C1->parent = R;\
  1.3113 +          }\
  1.3114 +          else\
  1.3115 +            CORRUPTION_ERROR_ACTION(M);\
  1.3116 +        }\
  1.3117 +      }\
  1.3118 +      else\
  1.3119 +        CORRUPTION_ERROR_ACTION(M);\
  1.3120 +    }\
  1.3121 +  }\
  1.3122 +}
  1.3123 +
  1.3124 +/* Relays to large vs small bin operations */
  1.3125 +
  1.3126 +#define insert_chunk(M, P, S)\
  1.3127 +  if (is_small(S)) insert_small_chunk(M, P, S)\
  1.3128 +  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
  1.3129 +
  1.3130 +#define unlink_chunk(M, P, S)\
  1.3131 +  if (is_small(S)) unlink_small_chunk(M, P, S)\
  1.3132 +  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
  1.3133 +
  1.3134 +
  1.3135 +/* Relays to internal calls to malloc/free from realloc, memalign etc */
  1.3136 +
  1.3137 +#if ONLY_MSPACES
  1.3138 +#define internal_malloc(m, b) mspace_malloc(m, b)
  1.3139 +#define internal_free(m, mem) mspace_free(m,mem);
  1.3140 +#else /* ONLY_MSPACES */
  1.3141 +#if MSPACES
  1.3142 +#define internal_malloc(m, b)\
  1.3143 +   (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
  1.3144 +#define internal_free(m, mem)\
  1.3145 +   if (m == gm) dlfree(mem); else mspace_free(m,mem);
  1.3146 +#else /* MSPACES */
  1.3147 +#define internal_malloc(m, b) dlmalloc(b)
  1.3148 +#define internal_free(m, mem) dlfree(mem)
  1.3149 +#endif /* MSPACES */
  1.3150 +#endif /* ONLY_MSPACES */
  1.3151 +
  1.3152 +/* -----------------------  Direct-mmapping chunks ----------------------- */
  1.3153 +
  1.3154 +/*
  1.3155 +  Directly mmapped chunks are set up with an offset to the start of
  1.3156 +  the mmapped region stored in the prev_foot field of the chunk. This
  1.3157 +  allows reconstruction of the required argument to MUNMAP when freed,
  1.3158 +  and also allows adjustment of the returned chunk to meet alignment
  1.3159 +  requirements (especially in memalign).  There is also enough space
  1.3160 +  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  1.3161 +  the PINUSE bit so frees can be checked.
  1.3162 +*/
  1.3163 +
  1.3164 +/* Malloc using mmap */
  1.3165 +static void* mmap_alloc(mstate m, size_t nb) {
  1.3166 +  size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  1.3167 +  if (mmsize > nb) {     /* Check for wrap around 0 */
  1.3168 +    char* mm = (char*)(DIRECT_MMAP(mmsize));
  1.3169 +    if (mm != CMFAIL) {
  1.3170 +      size_t offset = align_offset(chunk2mem(mm));
  1.3171 +      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  1.3172 +      mchunkptr p = (mchunkptr)(mm + offset);
  1.3173 +      p->prev_foot = offset | IS_MMAPPED_BIT;
  1.3174 +      (p)->head = (psize|CINUSE_BIT);
  1.3175 +      mark_inuse_foot(m, p, psize);
  1.3176 +      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  1.3177 +      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  1.3178 +
  1.3179 +      if (mm < m->least_addr)
  1.3180 +        m->least_addr = mm;
  1.3181 +      if ((m->footprint += mmsize) > m->max_footprint)
  1.3182 +        m->max_footprint = m->footprint;
  1.3183 +      assert(is_aligned(chunk2mem(p)));
  1.3184 +      check_mmapped_chunk(m, p);
  1.3185 +      return chunk2mem(p);
  1.3186 +    }
  1.3187 +  }
  1.3188 +  return 0;
  1.3189 +}
  1.3190 +
  1.3191 +/* Realloc using mmap */
  1.3192 +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  1.3193 +  size_t oldsize = chunksize(oldp);
  1.3194 +  if (is_small(nb)) /* Can't shrink mmap regions below small size */
  1.3195 +    return 0;
  1.3196 +  /* Keep old chunk if big enough but not too big */
  1.3197 +  if (oldsize >= nb + SIZE_T_SIZE &&
  1.3198 +      (oldsize - nb) <= (mparams.granularity << 1))
  1.3199 +    return oldp;
  1.3200 +  else {
  1.3201 +    size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  1.3202 +    size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  1.3203 +    size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  1.3204 +                                         CHUNK_ALIGN_MASK);
  1.3205 +    char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
  1.3206 +                                  oldmmsize, newmmsize, 1);
  1.3207 +    if (cp != CMFAIL) {
  1.3208 +      mchunkptr newp = (mchunkptr)(cp + offset);
  1.3209 +      size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  1.3210 +      newp->head = (psize|CINUSE_BIT);
  1.3211 +      mark_inuse_foot(m, newp, psize);
  1.3212 +      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  1.3213 +      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
  1.3214 +
  1.3215 +      if (cp < m->least_addr)
  1.3216 +        m->least_addr = cp;
  1.3217 +      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  1.3218 +        m->max_footprint = m->footprint;
  1.3219 +      check_mmapped_chunk(m, newp);
  1.3220 +      return newp;
  1.3221 +    }
  1.3222 +  }
  1.3223 +  return 0;
  1.3224 +}
  1.3225 +
  1.3226 +/* -------------------------- mspace management -------------------------- */
  1.3227 +
  1.3228 +/* Initialize top chunk and its size */
  1.3229 +static void init_top(mstate m, mchunkptr p, size_t psize) {
  1.3230 +  /* Ensure alignment */
  1.3231 +  size_t offset = align_offset(chunk2mem(p));
  1.3232 +  p = (mchunkptr)((char*)p + offset);
  1.3233 +  psize -= offset;
  1.3234 +
  1.3235 +  m->top = p;
  1.3236 +  m->topsize = psize;
  1.3237 +  p->head = psize | PINUSE_BIT;
  1.3238 +  /* set size of fake trailing chunk holding overhead space only once */
  1.3239 +  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  1.3240 +  m->trim_check = mparams.trim_threshold; /* reset on each update */
  1.3241 +}
  1.3242 +
  1.3243 +/* Initialize bins for a new mstate that is otherwise zeroed out */
  1.3244 +static void init_bins(mstate m) {
  1.3245 +  /* Establish circular links for smallbins */
  1.3246 +  bindex_t i;
  1.3247 +  for (i = 0; i < NSMALLBINS; ++i) {
  1.3248 +    sbinptr bin = smallbin_at(m,i);
  1.3249 +    bin->fd = bin->bk = bin;
  1.3250 +  }
  1.3251 +}
  1.3252 +
  1.3253 +#if PROCEED_ON_ERROR
  1.3254 +
  1.3255 +/* default corruption action */
  1.3256 +static void reset_on_error(mstate m) {
  1.3257 +  int i;
  1.3258 +  ++malloc_corruption_error_count;
  1.3259 +  /* Reinitialize fields to forget about all memory */
  1.3260 +  m->smallbins = m->treebins = 0;
  1.3261 +  m->dvsize = m->topsize = 0;
  1.3262 +  m->seg.base = 0;
  1.3263 +  m->seg.size = 0;
  1.3264 +  m->seg.next = 0;
  1.3265 +  m->top = m->dv = 0;
  1.3266 +  for (i = 0; i < NTREEBINS; ++i)
  1.3267 +    *treebin_at(m, i) = 0;
  1.3268 +  init_bins(m);
  1.3269 +}
  1.3270 +#endif /* PROCEED_ON_ERROR */
  1.3271 +
  1.3272 +/* Allocate chunk and prepend remainder with chunk in successor base. */
  1.3273 +static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  1.3274 +                           size_t nb) {
  1.3275 +  mchunkptr p = align_as_chunk(newbase);
  1.3276 +  mchunkptr oldfirst = align_as_chunk(oldbase);
  1.3277 +  size_t psize = (char*)oldfirst - (char*)p;
  1.3278 +  mchunkptr q = chunk_plus_offset(p, nb);
  1.3279 +  size_t qsize = psize - nb;
  1.3280 +  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  1.3281 +
  1.3282 +  assert((char*)oldfirst > (char*)q);
  1.3283 +  assert(pinuse(oldfirst));
  1.3284 +  assert(qsize >= MIN_CHUNK_SIZE);
  1.3285 +
  1.3286 +  /* consolidate remainder with first chunk of old base */
  1.3287 +  if (oldfirst == m->top) {
  1.3288 +    size_t tsize = m->topsize += qsize;
  1.3289 +    m->top = q;
  1.3290 +    q->head = tsize | PINUSE_BIT;
  1.3291 +    check_top_chunk(m, q);
  1.3292 +  }
  1.3293 +  else if (oldfirst == m->dv) {
  1.3294 +    size_t dsize = m->dvsize += qsize;
  1.3295 +    m->dv = q;
  1.3296 +    set_size_and_pinuse_of_free_chunk(q, dsize);
  1.3297 +  }
  1.3298 +  else {
  1.3299 +    if (!cinuse(oldfirst)) {
  1.3300 +      size_t nsize = chunksize(oldfirst);
  1.3301 +      unlink_chunk(m, oldfirst, nsize);
  1.3302 +      oldfirst = chunk_plus_offset(oldfirst, nsize);
  1.3303 +      qsize += nsize;
  1.3304 +    }
  1.3305 +    set_free_with_pinuse(q, qsize, oldfirst);
  1.3306 +    insert_chunk(m, q, qsize);
  1.3307 +    check_free_chunk(m, q);
  1.3308 +  }
  1.3309 +
  1.3310 +  check_malloced_chunk(m, chunk2mem(p), nb);
  1.3311 +  return chunk2mem(p);
  1.3312 +}
  1.3313 +
  1.3314 +
  1.3315 +/* Add a segment to hold a new noncontiguous region */
  1.3316 +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  1.3317 +  /* Determine locations and sizes of segment, fenceposts, old top */
  1.3318 +  char* old_top = (char*)m->top;
  1.3319 +  msegmentptr oldsp = segment_holding(m, old_top);
  1.3320 +  char* old_end = oldsp->base + oldsp->size;
  1.3321 +  size_t ssize = pad_request(sizeof(struct malloc_segment));
  1.3322 +  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  1.3323 +  size_t offset = align_offset(chunk2mem(rawsp));
  1.3324 +  char* asp = rawsp + offset;
  1.3325 +  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  1.3326 +  mchunkptr sp = (mchunkptr)csp;
  1.3327 +  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  1.3328 +  mchunkptr tnext = chunk_plus_offset(sp, ssize);
  1.3329 +  mchunkptr p = tnext;
  1.3330 +  int nfences = 0;
  1.3331 +
  1.3332 +  /* reset top to new space */
  1.3333 +  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  1.3334 +
  1.3335 +  /* Set up segment record */
  1.3336 +  assert(is_aligned(ss));
  1.3337 +  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  1.3338 +  *ss = m->seg; /* Push current record */
  1.3339 +  m->seg.base = tbase;
  1.3340 +  m->seg.size = tsize;
  1.3341 +  m->seg.sflags = mmapped;
  1.3342 +  m->seg.next = ss;
  1.3343 +
  1.3344 +  /* Insert trailing fenceposts */
  1.3345 +  for (;;) {
  1.3346 +    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  1.3347 +    p->head = FENCEPOST_HEAD;
  1.3348 +    ++nfences;
  1.3349 +    if ((char*)(&(nextp->head)) < old_end)
  1.3350 +      p = nextp;
  1.3351 +    else
  1.3352 +      break;
  1.3353 +  }
  1.3354 +  assert(nfences >= 2);
  1.3355 +
  1.3356 +  /* Insert the rest of old top into a bin as an ordinary free chunk */
  1.3357 +  if (csp != old_top) {
  1.3358 +    mchunkptr q = (mchunkptr)old_top;
  1.3359 +    size_t psize = csp - old_top;
  1.3360 +    mchunkptr tn = chunk_plus_offset(q, psize);
  1.3361 +    set_free_with_pinuse(q, psize, tn);
  1.3362 +    insert_chunk(m, q, psize);
  1.3363 +  }
  1.3364 +
  1.3365 +  check_top_chunk(m, m->top);
  1.3366 +}
  1.3367 +
  1.3368 +/* -------------------------- System allocation -------------------------- */
  1.3369 +
  1.3370 +/* Get memory from system using MORECORE or MMAP */
  1.3371 +static void* sys_alloc(mstate m, size_t nb) {
  1.3372 +  char* tbase = CMFAIL;
  1.3373 +  size_t tsize = 0;
  1.3374 +  flag_t mmap_flag = 0;
  1.3375 +
  1.3376 +  init_mparams();
  1.3377 +
  1.3378 +  /* Directly map large chunks */
  1.3379 +  if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  1.3380 +    void* mem = mmap_alloc(m, nb);
  1.3381 +    if (mem != 0)
  1.3382 +      return mem;
  1.3383 +  }
  1.3384 +
  1.3385 +  /*
  1.3386 +    Try getting memory in any of three ways (in most-preferred to
  1.3387 +    least-preferred order):
  1.3388 +    1. A call to MORECORE that can normally contiguously extend memory.
  1.3389 +       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  1.3390 +       or main space is mmapped or a previous contiguous call failed)
  1.3391 +    2. A call to MMAP new space (disabled if not HAVE_MMAP).
  1.3392 +       Note that under the default settings, if MORECORE is unable to
  1.3393 +       fulfill a request, and HAVE_MMAP is true, then mmap is
  1.3394 +       used as a noncontiguous system allocator. This is a useful backup
  1.3395 +       strategy for systems with holes in address spaces -- in this case
  1.3396 +       sbrk cannot contiguously expand the heap, but mmap may be able to
  1.3397 +       find space.
  1.3398 +    3. A call to MORECORE that cannot usually contiguously extend memory.
  1.3399 +       (disabled if not HAVE_MORECORE)
  1.3400 +  */
  1.3401 +
  1.3402 +  if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  1.3403 +    char* br = CMFAIL;
  1.3404 +    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  1.3405 +    size_t asize = 0;
  1.3406 +    ACQUIRE_MORECORE_LOCK();
  1.3407 +
  1.3408 +    if (ss == 0) {  /* First time through or recovery */
  1.3409 +      char* base = (char*)CALL_MORECORE(0);
  1.3410 +      if (base != CMFAIL) {
  1.3411 +        asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.3412 +        /* Adjust to end on a page boundary */
  1.3413 +        if (!is_page_aligned(base))
  1.3414 +          asize += (page_align((size_t)base) - (size_t)base);
  1.3415 +        /* Can't call MORECORE if size is negative when treated as signed */
  1.3416 +        if (asize < HALF_MAX_SIZE_T &&
  1.3417 +            (br = (char*)(CALL_MORECORE(asize))) == base) {
  1.3418 +          tbase = base;
  1.3419 +          tsize = asize;
  1.3420 +        }
  1.3421 +      }
  1.3422 +    }
  1.3423 +    else {
  1.3424 +      /* Subtract out existing available top space from MORECORE request. */
  1.3425 +      asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.3426 +      /* Use mem here only if it did continuously extend old space */
  1.3427 +      if (asize < HALF_MAX_SIZE_T &&
  1.3428 +          (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  1.3429 +        tbase = br;
  1.3430 +        tsize = asize;
  1.3431 +      }
  1.3432 +    }
  1.3433 +
  1.3434 +    if (tbase == CMFAIL) {    /* Cope with partial failure */
  1.3435 +      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  1.3436 +        if (asize < HALF_MAX_SIZE_T &&
  1.3437 +            asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  1.3438 +          size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
  1.3439 +          if (esize < HALF_MAX_SIZE_T) {
  1.3440 +            char* end = (char*)CALL_MORECORE(esize);
  1.3441 +            if (end != CMFAIL)
  1.3442 +              asize += esize;
  1.3443 +            else {            /* Can't use; try to release */
  1.3444 +              end = (char*)CALL_MORECORE(-asize);
  1.3445 +              br = CMFAIL;
  1.3446 +            }
  1.3447 +          }
  1.3448 +        }
  1.3449 +      }
  1.3450 +      if (br != CMFAIL) {    /* Use the space we did get */
  1.3451 +        tbase = br;
  1.3452 +        tsize = asize;
  1.3453 +      }
  1.3454 +      else
  1.3455 +        disable_contiguous(m); /* Don't try contiguous path in the future */
  1.3456 +    }
  1.3457 +
  1.3458 +    RELEASE_MORECORE_LOCK();
  1.3459 +  }
  1.3460 +
  1.3461 +  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  1.3462 +    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  1.3463 +    size_t rsize = granularity_align(req);
  1.3464 +    if (rsize > nb) { /* Fail if wraps around zero */
  1.3465 +      char* mp = (char*)(CALL_MMAP(rsize));
  1.3466 +      if (mp != CMFAIL) {
  1.3467 +        tbase = mp;
  1.3468 +        tsize = rsize;
  1.3469 +        mmap_flag = IS_MMAPPED_BIT;
  1.3470 +      }
  1.3471 +    }
  1.3472 +  }
  1.3473 +
  1.3474 +  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
  1.3475 +    size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.3476 +    if (asize < HALF_MAX_SIZE_T) {
  1.3477 +      char* br = CMFAIL;
  1.3478 +      char* end = CMFAIL;
  1.3479 +      ACQUIRE_MORECORE_LOCK();
  1.3480 +      br = (char*)(CALL_MORECORE(asize));
  1.3481 +      end = (char*)(CALL_MORECORE(0));
  1.3482 +      RELEASE_MORECORE_LOCK();
  1.3483 +      if (br != CMFAIL && end != CMFAIL && br < end) {
  1.3484 +        size_t ssize = end - br;
  1.3485 +        if (ssize > nb + TOP_FOOT_SIZE) {
  1.3486 +          tbase = br;
  1.3487 +          tsize = ssize;
  1.3488 +        }
  1.3489 +      }
  1.3490 +    }
  1.3491 +  }
  1.3492 +
  1.3493 +  if (tbase != CMFAIL) {
  1.3494 +
  1.3495 +    if ((m->footprint += tsize) > m->max_footprint)
  1.3496 +      m->max_footprint = m->footprint;
  1.3497 +
  1.3498 +    if (!is_initialized(m)) { /* first-time initialization */
  1.3499 +      m->seg.base = m->least_addr = tbase;
  1.3500 +      m->seg.size = tsize;
  1.3501 +      m->seg.sflags = mmap_flag;
  1.3502 +      m->magic = mparams.magic;
  1.3503 +      init_bins(m);
  1.3504 +      if (is_global(m)) 
  1.3505 +        init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  1.3506 +      else {
  1.3507 +        /* Offset top by embedded malloc_state */
  1.3508 +        mchunkptr mn = next_chunk(mem2chunk(m));
  1.3509 +        init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
  1.3510 +      }
  1.3511 +    }
  1.3512 +
  1.3513 +    else {
  1.3514 +      /* Try to merge with an existing segment */
  1.3515 +      msegmentptr sp = &m->seg;
  1.3516 +      while (sp != 0 && tbase != sp->base + sp->size)
  1.3517 +        sp = sp->next;
  1.3518 +      if (sp != 0 &&
  1.3519 +          !is_extern_segment(sp) &&
  1.3520 +          (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
  1.3521 +          segment_holds(sp, m->top)) { /* append */
  1.3522 +        sp->size += tsize;
  1.3523 +        init_top(m, m->top, m->topsize + tsize);
  1.3524 +      }
  1.3525 +      else {
  1.3526 +        if (tbase < m->least_addr)
  1.3527 +          m->least_addr = tbase;
  1.3528 +        sp = &m->seg;
  1.3529 +        while (sp != 0 && sp->base != tbase + tsize)
  1.3530 +          sp = sp->next;
  1.3531 +        if (sp != 0 &&
  1.3532 +            !is_extern_segment(sp) &&
  1.3533 +            (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  1.3534 +          char* oldbase = sp->base;
  1.3535 +          sp->base = tbase;
  1.3536 +          sp->size += tsize;
  1.3537 +          return prepend_alloc(m, tbase, oldbase, nb);
  1.3538 +        }
  1.3539 +        else
  1.3540 +          add_segment(m, tbase, tsize, mmap_flag);
  1.3541 +      }
  1.3542 +    }
  1.3543 +
  1.3544 +    if (nb < m->topsize) { /* Allocate from new or extended top space */
  1.3545 +      size_t rsize = m->topsize -= nb;
  1.3546 +      mchunkptr p = m->top;
  1.3547 +      mchunkptr r = m->top = chunk_plus_offset(p, nb);
  1.3548 +      r->head = rsize | PINUSE_BIT;
  1.3549 +      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  1.3550 +      check_top_chunk(m, m->top);
  1.3551 +      check_malloced_chunk(m, chunk2mem(p), nb);
  1.3552 +      return chunk2mem(p);
  1.3553 +    }
  1.3554 +  }
  1.3555 +
  1.3556 +  MALLOC_FAILURE_ACTION;
  1.3557 +  return 0;
  1.3558 +}
  1.3559 +
  1.3560 +/* -----------------------  system deallocation -------------------------- */
  1.3561 +
  1.3562 +/* Unmap and unlink any mmapped segments that don't contain used chunks */
  1.3563 +static size_t release_unused_segments(mstate m) {
  1.3564 +  size_t released = 0;
  1.3565 +  msegmentptr pred = &m->seg;
  1.3566 +  msegmentptr sp = pred->next;
  1.3567 +  while (sp != 0) {
  1.3568 +    char* base = sp->base;
  1.3569 +    size_t size = sp->size;
  1.3570 +    msegmentptr next = sp->next;
  1.3571 +    if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  1.3572 +      mchunkptr p = align_as_chunk(base);
  1.3573 +      size_t psize = chunksize(p);
  1.3574 +      /* Can unmap if first chunk holds entire segment and not pinned */
  1.3575 +      if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
  1.3576 +        tchunkptr tp = (tchunkptr)p;
  1.3577 +        assert(segment_holds(sp, (char*)sp));
  1.3578 +        if (p == m->dv) {
  1.3579 +          m->dv = 0;
  1.3580 +          m->dvsize = 0;
  1.3581 +        }
  1.3582 +        else {
  1.3583 +          unlink_large_chunk(m, tp);
  1.3584 +        }
  1.3585 +        if (CALL_MUNMAP(base, size) == 0) {
  1.3586 +          released += size;
  1.3587 +          m->footprint -= size;
  1.3588 +          /* unlink obsoleted record */
  1.3589 +          sp = pred;
  1.3590 +          sp->next = next;
  1.3591 +        }
  1.3592 +        else { /* back out if cannot unmap */
  1.3593 +          insert_large_chunk(m, tp, psize);
  1.3594 +        }
  1.3595 +      }
  1.3596 +    }
  1.3597 +    pred = sp;
  1.3598 +    sp = next;
  1.3599 +  }
  1.3600 +  return released;
  1.3601 +}
  1.3602 +
  1.3603 +static int sys_trim(mstate m, size_t pad) {
  1.3604 +  size_t released = 0;
  1.3605 +  if (pad < MAX_REQUEST && is_initialized(m)) {
  1.3606 +    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
  1.3607 +
  1.3608 +    if (m->topsize > pad) {
  1.3609 +      /* Shrink top space in granularity-size units, keeping at least one */
  1.3610 +      size_t unit = mparams.granularity;
  1.3611 +      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  1.3612 +                      SIZE_T_ONE) * unit;
  1.3613 +      msegmentptr sp = segment_holding(m, (char*)m->top);
  1.3614 +
  1.3615 +      if (!is_extern_segment(sp)) {
  1.3616 +        if (is_mmapped_segment(sp)) {
  1.3617 +          if (HAVE_MMAP &&
  1.3618 +              sp->size >= extra &&
  1.3619 +              !has_segment_link(m, sp)) { /* can't shrink if pinned */
  1.3620 +            size_t newsize = sp->size - extra;
  1.3621 +            /* Prefer mremap, fall back to munmap */
  1.3622 +            if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
  1.3623 +                (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
  1.3624 +              released = extra;
  1.3625 +            }
  1.3626 +          }
  1.3627 +        }
  1.3628 +        else if (HAVE_MORECORE) {
  1.3629 +          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
  1.3630 +            extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  1.3631 +          ACQUIRE_MORECORE_LOCK();
  1.3632 +          {
  1.3633 +            /* Make sure end of memory is where we last set it. */
  1.3634 +            char* old_br = (char*)(CALL_MORECORE(0));
  1.3635 +            if (old_br == sp->base + sp->size) {
  1.3636 +              char* rel_br = (char*)(CALL_MORECORE(-extra));
  1.3637 +              char* new_br = (char*)(CALL_MORECORE(0));
  1.3638 +              if (rel_br != CMFAIL && new_br < old_br)
  1.3639 +                released = old_br - new_br;
  1.3640 +            }
  1.3641 +          }
  1.3642 +          RELEASE_MORECORE_LOCK();
  1.3643 +        }
  1.3644 +      }
  1.3645 +
  1.3646 +      if (released != 0) {
  1.3647 +        sp->size -= released;
  1.3648 +        m->footprint -= released;
  1.3649 +        init_top(m, m->top, m->topsize - released);
  1.3650 +        check_top_chunk(m, m->top);
  1.3651 +      }
  1.3652 +    }
  1.3653 +
  1.3654 +    /* Unmap any unused mmapped segments */
  1.3655 +    if (HAVE_MMAP) 
  1.3656 +      released += release_unused_segments(m);
  1.3657 +
  1.3658 +    /* On failure, disable autotrim to avoid repeated failed future calls */
  1.3659 +    if (released == 0)
  1.3660 +      m->trim_check = MAX_SIZE_T;
  1.3661 +  }
  1.3662 +
  1.3663 +  return (released != 0)? 1 : 0;
  1.3664 +}
  1.3665 +
  1.3666 +/* ---------------------------- malloc support --------------------------- */
  1.3667 +
  1.3668 +/* allocate a large request from the best fitting chunk in a treebin */
  1.3669 +static void* tmalloc_large(mstate m, size_t nb) {
  1.3670 +  tchunkptr v = 0;
  1.3671 +  size_t rsize = -nb; /* Unsigned negation */
  1.3672 +  tchunkptr t;
  1.3673 +  bindex_t idx;
  1.3674 +  compute_tree_index(nb, idx);
  1.3675 +
  1.3676 +  if ((t = *treebin_at(m, idx)) != 0) {
  1.3677 +    /* Traverse tree for this bin looking for node with size == nb */
  1.3678 +    size_t sizebits = nb << leftshift_for_tree_index(idx);
  1.3679 +    tchunkptr rst = 0;  /* The deepest untaken right subtree */
  1.3680 +    for (;;) {
  1.3681 +      tchunkptr rt;
  1.3682 +      size_t trem = chunksize(t) - nb;
  1.3683 +      if (trem < rsize) {
  1.3684 +        v = t;
  1.3685 +        if ((rsize = trem) == 0)
  1.3686 +          break;
  1.3687 +      }
  1.3688 +      rt = t->child[1];
  1.3689 +      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  1.3690 +      if (rt != 0 && rt != t)
  1.3691 +        rst = rt;
  1.3692 +      if (t == 0) {
  1.3693 +        t = rst; /* set t to least subtree holding sizes > nb */
  1.3694 +        break;
  1.3695 +      }
  1.3696 +      sizebits <<= 1;
  1.3697 +    }
  1.3698 +  }
  1.3699 +
  1.3700 +  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
  1.3701 +    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  1.3702 +    if (leftbits != 0) {
  1.3703 +      bindex_t i;
  1.3704 +      binmap_t leastbit = least_bit(leftbits);
  1.3705 +      compute_bit2idx(leastbit, i);
  1.3706 +      t = *treebin_at(m, i);
  1.3707 +    }
  1.3708 +  }
  1.3709 +
  1.3710 +  while (t != 0) { /* find smallest of tree or subtree */
  1.3711 +    size_t trem = chunksize(t) - nb;
  1.3712 +    if (trem < rsize) {
  1.3713 +      rsize = trem;
  1.3714 +      v = t;
  1.3715 +    }
  1.3716 +    t = leftmost_child(t);
  1.3717 +  }
  1.3718 +
  1.3719 +  /*  If dv is a better fit, return 0 so malloc will use it */
  1.3720 +  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
  1.3721 +    if (RTCHECK(ok_address(m, v))) { /* split */
  1.3722 +      mchunkptr r = chunk_plus_offset(v, nb);
  1.3723 +      assert(chunksize(v) == rsize + nb);
  1.3724 +      if (RTCHECK(ok_next(v, r))) {
  1.3725 +        unlink_large_chunk(m, v);
  1.3726 +        if (rsize < MIN_CHUNK_SIZE)
  1.3727 +          set_inuse_and_pinuse(m, v, (rsize + nb));
  1.3728 +        else {
  1.3729 +          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  1.3730 +          set_size_and_pinuse_of_free_chunk(r, rsize);
  1.3731 +          insert_chunk(m, r, rsize);
  1.3732 +        }
  1.3733 +        return chunk2mem(v);
  1.3734 +      }
  1.3735 +    }
  1.3736 +    CORRUPTION_ERROR_ACTION(m);
  1.3737 +  }
  1.3738 +  return 0;
  1.3739 +}
  1.3740 +
  1.3741 +/* allocate a small request from the best fitting chunk in a treebin */
  1.3742 +static void* tmalloc_small(mstate m, size_t nb) {
  1.3743 +  tchunkptr t, v;
  1.3744 +  size_t rsize;
  1.3745 +  bindex_t i;
  1.3746 +  binmap_t leastbit = least_bit(m->treemap);
  1.3747 +  compute_bit2idx(leastbit, i);
  1.3748 +
  1.3749 +  v = t = *treebin_at(m, i);
  1.3750 +  rsize = chunksize(t) - nb;
  1.3751 +
  1.3752 +  while ((t = leftmost_child(t)) != 0) {
  1.3753 +    size_t trem = chunksize(t) - nb;
  1.3754 +    if (trem < rsize) {
  1.3755 +      rsize = trem;
  1.3756 +      v = t;
  1.3757 +    }
  1.3758 +  }
  1.3759 +
  1.3760 +  if (RTCHECK(ok_address(m, v))) {
  1.3761 +    mchunkptr r = chunk_plus_offset(v, nb);
  1.3762 +    assert(chunksize(v) == rsize + nb);
  1.3763 +    if (RTCHECK(ok_next(v, r))) {
  1.3764 +      unlink_large_chunk(m, v);
  1.3765 +      if (rsize < MIN_CHUNK_SIZE)
  1.3766 +        set_inuse_and_pinuse(m, v, (rsize + nb));
  1.3767 +      else {
  1.3768 +        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  1.3769 +        set_size_and_pinuse_of_free_chunk(r, rsize);
  1.3770 +        replace_dv(m, r, rsize);
  1.3771 +      }
  1.3772 +      return chunk2mem(v);
  1.3773 +    }
  1.3774 +  }
  1.3775 +
  1.3776 +  CORRUPTION_ERROR_ACTION(m);
  1.3777 +  return 0;
  1.3778 +}
  1.3779 +
  1.3780 +/* --------------------------- realloc support --------------------------- */
  1.3781 +
  1.3782 +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
  1.3783 +  if (bytes >= MAX_REQUEST) {
  1.3784 +    MALLOC_FAILURE_ACTION;
  1.3785 +    return 0;
  1.3786 +  }
  1.3787 +  if (!PREACTION(m)) {
  1.3788 +    mchunkptr oldp = mem2chunk(oldmem);
  1.3789 +    size_t oldsize = chunksize(oldp);
  1.3790 +    mchunkptr next = chunk_plus_offset(oldp, oldsize);
  1.3791 +    mchunkptr newp = 0;
  1.3792 +    void* extra = 0;
  1.3793 +
  1.3794 +    /* Try to either shrink or extend into top. Else malloc-copy-free */
  1.3795 +
  1.3796 +    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  1.3797 +                ok_next(oldp, next) && ok_pinuse(next))) {
  1.3798 +      size_t nb = request2size(bytes);
  1.3799 +      if (is_mmapped(oldp))
  1.3800 +        newp = mmap_resize(m, oldp, nb);
  1.3801 +      else if (oldsize >= nb) { /* already big enough */
  1.3802 +        size_t rsize = oldsize - nb;
  1.3803 +        newp = oldp;
  1.3804 +        if (rsize >= MIN_CHUNK_SIZE) {
  1.3805 +          mchunkptr remainder = chunk_plus_offset(newp, nb);
  1.3806 +          set_inuse(m, newp, nb);
  1.3807 +          set_inuse(m, remainder, rsize);
  1.3808 +          extra = chunk2mem(remainder);
  1.3809 +        }
  1.3810 +      }
  1.3811 +      else if (next == m->top && oldsize + m->topsize > nb) {
  1.3812 +        /* Expand into top */
  1.3813 +        size_t newsize = oldsize + m->topsize;
  1.3814 +        size_t newtopsize = newsize - nb;
  1.3815 +        mchunkptr newtop = chunk_plus_offset(oldp, nb);
  1.3816 +        set_inuse(m, oldp, nb);
  1.3817 +        newtop->head = newtopsize |PINUSE_BIT;
  1.3818 +        m->top = newtop;
  1.3819 +        m->topsize = newtopsize;
  1.3820 +        newp = oldp;
  1.3821 +      }
  1.3822 +    }
  1.3823 +    else {
  1.3824 +      USAGE_ERROR_ACTION(m, oldmem);
  1.3825 +      POSTACTION(m);
  1.3826 +      return 0;
  1.3827 +    }
  1.3828 +
  1.3829 +    POSTACTION(m);
  1.3830 +
  1.3831 +    if (newp != 0) {
  1.3832 +      if (extra != 0) {
  1.3833 +        internal_free(m, extra);
  1.3834 +      }
  1.3835 +      check_inuse_chunk(m, newp);
  1.3836 +      return chunk2mem(newp);
  1.3837 +    }
  1.3838 +    else {
  1.3839 +      void* newmem = internal_malloc(m, bytes);
  1.3840 +      if (newmem != 0) {
  1.3841 +        size_t oc = oldsize - overhead_for(oldp);
  1.3842 +        memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
  1.3843 +        internal_free(m, oldmem);
  1.3844 +      }
  1.3845 +      return newmem;
  1.3846 +    }
  1.3847 +  }
  1.3848 +  return 0;
  1.3849 +}
  1.3850 +
  1.3851 +/* --------------------------- memalign support -------------------------- */
  1.3852 +
  1.3853 +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
  1.3854 +  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
  1.3855 +    return internal_malloc(m, bytes);
  1.3856 +  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
  1.3857 +    alignment = MIN_CHUNK_SIZE;
  1.3858 +  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
  1.3859 +    size_t a = MALLOC_ALIGNMENT << 1;
  1.3860 +    while (a < alignment) a <<= 1;
  1.3861 +    alignment = a;
  1.3862 +  }
  1.3863 +  
  1.3864 +  if (bytes >= MAX_REQUEST - alignment) {
  1.3865 +    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
  1.3866 +      MALLOC_FAILURE_ACTION;
  1.3867 +    }
  1.3868 +  }
  1.3869 +  else {
  1.3870 +    size_t nb = request2size(bytes);
  1.3871 +    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  1.3872 +    char* mem = (char*)internal_malloc(m, req);
  1.3873 +    if (mem != 0) {
  1.3874 +      void* leader = 0;
  1.3875 +      void* trailer = 0;
  1.3876 +      mchunkptr p = mem2chunk(mem);
  1.3877 +
  1.3878 +      if (PREACTION(m)) return 0;
  1.3879 +      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
  1.3880 +        /*
  1.3881 +          Find an aligned spot inside chunk.  Since we need to give
  1.3882 +          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
  1.3883 +          the first calculation places us at a spot with less than
  1.3884 +          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
  1.3885 +          We've allocated enough total room so that this is always
  1.3886 +          possible.
  1.3887 +        */
  1.3888 +        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
  1.3889 +                                                       alignment -
  1.3890 +                                                       SIZE_T_ONE)) &
  1.3891 +                                             -alignment));
  1.3892 +        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
  1.3893 +          br : br+alignment;
  1.3894 +        mchunkptr newp = (mchunkptr)pos;
  1.3895 +        size_t leadsize = pos - (char*)(p);
  1.3896 +        size_t newsize = chunksize(p) - leadsize;
  1.3897 +
  1.3898 +        if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
  1.3899 +          newp->prev_foot = p->prev_foot + leadsize;
  1.3900 +          newp->head = (newsize|CINUSE_BIT);
  1.3901 +        }
  1.3902 +        else { /* Otherwise, give back leader, use the rest */
  1.3903 +          set_inuse(m, newp, newsize);
  1.3904 +          set_inuse(m, p, leadsize);
  1.3905 +          leader = chunk2mem(p);
  1.3906 +        }
  1.3907 +        p = newp;
  1.3908 +      }
  1.3909 +
  1.3910 +      /* Give back spare room at the end */
  1.3911 +      if (!is_mmapped(p)) {
  1.3912 +        size_t size = chunksize(p);
  1.3913 +        if (size > nb + MIN_CHUNK_SIZE) {
  1.3914 +          size_t remainder_size = size - nb;
  1.3915 +          mchunkptr remainder = chunk_plus_offset(p, nb);
  1.3916 +          set_inuse(m, p, nb);
  1.3917 +          set_inuse(m, remainder, remainder_size);
  1.3918 +          trailer = chunk2mem(remainder);
  1.3919 +        }
  1.3920 +      }
  1.3921 +
  1.3922 +      assert (chunksize(p) >= nb);
  1.3923 +      assert((((size_t)(chunk2mem(p))) % alignment) == 0);
  1.3924 +      check_inuse_chunk(m, p);
  1.3925 +      POSTACTION(m);
  1.3926 +      if (leader != 0) {
  1.3927 +        internal_free(m, leader);
  1.3928 +      }
  1.3929 +      if (trailer != 0) {
  1.3930 +        internal_free(m, trailer);
  1.3931 +      }
  1.3932 +      return chunk2mem(p);
  1.3933 +    }
  1.3934 +  }
  1.3935 +  return 0;
  1.3936 +}
  1.3937 +
  1.3938 +/* ------------------------ comalloc/coalloc support --------------------- */
  1.3939 +
  1.3940 +static void** ialloc(mstate m,
  1.3941 +                     size_t n_elements,
  1.3942 +                     size_t* sizes,
  1.3943 +                     int opts,
  1.3944 +                     void* chunks[]) {
  1.3945 +  /*
  1.3946 +    This provides common support for independent_X routines, handling
  1.3947 +    all of the combinations that can result.
  1.3948 +
  1.3949 +    The opts arg has:
  1.3950 +    bit 0 set if all elements are same size (using sizes[0])
  1.3951 +    bit 1 set if elements should be zeroed
  1.3952 +  */
  1.3953 +
  1.3954 +  size_t    element_size;   /* chunksize of each element, if all same */
  1.3955 +  size_t    contents_size;  /* total size of elements */
  1.3956 +  size_t    array_size;     /* request size of pointer array */
  1.3957 +  void*     mem;            /* malloced aggregate space */
  1.3958 +  mchunkptr p;              /* corresponding chunk */
  1.3959 +  size_t    remainder_size; /* remaining bytes while splitting */
  1.3960 +  void**    marray;         /* either "chunks" or malloced ptr array */
  1.3961 +  mchunkptr array_chunk;    /* chunk for malloced ptr array */
  1.3962 +  flag_t    was_enabled;    /* to disable mmap */
  1.3963 +  size_t    size;
  1.3964 +  size_t    i;
  1.3965 +
  1.3966 +  /* compute array length, if needed */
  1.3967 +  if (chunks != 0) {
  1.3968 +    if (n_elements == 0)
  1.3969 +      return chunks; /* nothing to do */
  1.3970 +    marray = chunks;
  1.3971 +    array_size = 0;
  1.3972 +  }
  1.3973 +  else {
  1.3974 +    /* if empty req, must still return chunk representing empty array */
  1.3975 +    if (n_elements == 0)
  1.3976 +      return (void**)internal_malloc(m, 0);
  1.3977 +    marray = 0;
  1.3978 +    array_size = request2size(n_elements * (sizeof(void*)));
  1.3979 +  }
  1.3980 +
  1.3981 +  /* compute total element size */
  1.3982 +  if (opts & 0x1) { /* all-same-size */
  1.3983 +    element_size = request2size(*sizes);
  1.3984 +    contents_size = n_elements * element_size;
  1.3985 +  }
  1.3986 +  else { /* add up all the sizes */
  1.3987 +    element_size = 0;
  1.3988 +    contents_size = 0;
  1.3989 +    for (i = 0; i != n_elements; ++i)
  1.3990 +      contents_size += request2size(sizes[i]);
  1.3991 +  }
  1.3992 +
  1.3993 +  size = contents_size + array_size;
  1.3994 +
  1.3995 +  /*
  1.3996 +     Allocate the aggregate chunk.  First disable direct-mmapping so
  1.3997 +     malloc won't use it, since we would not be able to later
  1.3998 +     free/realloc space internal to a segregated mmap region.
  1.3999 +  */
  1.4000 +  was_enabled = use_mmap(m);
  1.4001 +  disable_mmap(m);
  1.4002 +  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  1.4003 +  if (was_enabled)
  1.4004 +    enable_mmap(m);
  1.4005 +  if (mem == 0)
  1.4006 +    return 0;
  1.4007 +
  1.4008 +  if (PREACTION(m)) return 0;
  1.4009 +  p = mem2chunk(mem);
  1.4010 +  remainder_size = chunksize(p);
  1.4011 +
  1.4012 +  assert(!is_mmapped(p));
  1.4013 +
  1.4014 +  if (opts & 0x2) {       /* optionally clear the elements */
  1.4015 +    memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  1.4016 +  }
  1.4017 +
  1.4018 +  /* If not provided, allocate the pointer array as final part of chunk */
  1.4019 +  if (marray == 0) {
  1.4020 +    size_t  array_chunk_size;
  1.4021 +    array_chunk = chunk_plus_offset(p, contents_size);
  1.4022 +    array_chunk_size = remainder_size - contents_size;
  1.4023 +    marray = (void**) (chunk2mem(array_chunk));
  1.4024 +    set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  1.4025 +    remainder_size = contents_size;
  1.4026 +  }
  1.4027 +
  1.4028 +  /* split out elements */
  1.4029 +  for (i = 0; ; ++i) {
  1.4030 +    marray[i] = chunk2mem(p);
  1.4031 +    if (i != n_elements-1) {
  1.4032 +      if (element_size != 0)
  1.4033 +        size = element_size;
  1.4034 +      else
  1.4035 +        size = request2size(sizes[i]);
  1.4036 +      remainder_size -= size;
  1.4037 +      set_size_and_pinuse_of_inuse_chunk(m, p, size);
  1.4038 +      p = chunk_plus_offset(p, size);
  1.4039 +    }
  1.4040 +    else { /* the final element absorbs any overallocation slop */
  1.4041 +      set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  1.4042 +      break;
  1.4043 +    }
  1.4044 +  }
  1.4045 +
  1.4046 +#if DEBUG
  1.4047 +  if (marray != chunks) {
  1.4048 +    /* final element must have exactly exhausted chunk */
  1.4049 +    if (element_size != 0) {
  1.4050 +      assert(remainder_size == element_size);
  1.4051 +    }
  1.4052 +    else {
  1.4053 +      assert(remainder_size == request2size(sizes[i]));
  1.4054 +    }
  1.4055 +    check_inuse_chunk(m, mem2chunk(marray));
  1.4056 +  }
  1.4057 +  for (i = 0; i != n_elements; ++i)
  1.4058 +    check_inuse_chunk(m, mem2chunk(marray[i]));
  1.4059 +
  1.4060 +#endif /* DEBUG */
  1.4061 +
  1.4062 +  POSTACTION(m);
  1.4063 +  return marray;
  1.4064 +}
  1.4065 +
  1.4066 +
  1.4067 +/* -------------------------- public routines ---------------------------- */
  1.4068 +
  1.4069 +#if !ONLY_MSPACES
  1.4070 +
  1.4071 +void* dlmalloc(size_t bytes) {
  1.4072 +  /*
  1.4073 +     Basic algorithm:
  1.4074 +     If a small request (< 256 bytes minus per-chunk overhead):
  1.4075 +       1. If one exists, use a remainderless chunk in associated smallbin.
  1.4076 +          (Remainderless means that there are too few excess bytes to
  1.4077 +          represent as a chunk.)
  1.4078 +       2. If it is big enough, use the dv chunk, which is normally the
  1.4079 +          chunk adjacent to the one used for the most recent small request.
  1.4080 +       3. If one exists, split the smallest available chunk in a bin,
  1.4081 +          saving remainder in dv.
  1.4082 +       4. If it is big enough, use the top chunk.
  1.4083 +       5. If available, get memory from system and use it
  1.4084 +     Otherwise, for a large request:
  1.4085 +       1. Find the smallest available binned chunk that fits, and use it
  1.4086 +          if it is better fitting than dv chunk, splitting if necessary.
  1.4087 +       2. If better fitting than any binned chunk, use the dv chunk.
  1.4088 +       3. If it is big enough, use the top chunk.
  1.4089 +       4. If request size >= mmap threshold, try to directly mmap this chunk.
  1.4090 +       5. If available, get memory from system and use it
  1.4091 +
  1.4092 +     The ugly goto's here ensure that postaction occurs along all paths.
  1.4093 +  */
  1.4094 +
  1.4095 +  if (!PREACTION(gm)) {
  1.4096 +    void* mem;
  1.4097 +    size_t nb;
  1.4098 +    if (bytes <= MAX_SMALL_REQUEST) {
  1.4099 +      bindex_t idx;
  1.4100 +      binmap_t smallbits;
  1.4101 +      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  1.4102 +      idx = small_index(nb);
  1.4103 +      smallbits = gm->smallmap >> idx;
  1.4104 +
  1.4105 +      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  1.4106 +        mchunkptr b, p;
  1.4107 +        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  1.4108 +        b = smallbin_at(gm, idx);
  1.4109 +        p = b->fd;
  1.4110 +        assert(chunksize(p) == small_index2size(idx));
  1.4111 +        unlink_first_small_chunk(gm, b, p, idx);
  1.4112 +        set_inuse_and_pinuse(gm, p, small_index2size(idx));
  1.4113 +        mem = chunk2mem(p);
  1.4114 +        check_malloced_chunk(gm, mem, nb);
  1.4115 +        goto postaction;
  1.4116 +      }
  1.4117 +
  1.4118 +      else if (nb > gm->dvsize) {
  1.4119 +        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  1.4120 +          mchunkptr b, p, r;
  1.4121 +          size_t rsize;
  1.4122 +          bindex_t i;
  1.4123 +          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  1.4124 +          binmap_t leastbit = least_bit(leftbits);
  1.4125 +          compute_bit2idx(leastbit, i);
  1.4126 +          b = smallbin_at(gm, i);
  1.4127 +          p = b->fd;
  1.4128 +          assert(chunksize(p) == small_index2size(i));
  1.4129 +          unlink_first_small_chunk(gm, b, p, i);
  1.4130 +          rsize = small_index2size(i) - nb;
  1.4131 +          /* Fit here cannot be remainderless if 4byte sizes */
  1.4132 +          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  1.4133 +            set_inuse_and_pinuse(gm, p, small_index2size(i));
  1.4134 +          else {
  1.4135 +            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.4136 +            r = chunk_plus_offset(p, nb);
  1.4137 +            set_size_and_pinuse_of_free_chunk(r, rsize);
  1.4138 +            replace_dv(gm, r, rsize);
  1.4139 +          }
  1.4140 +          mem = chunk2mem(p);
  1.4141 +          check_malloced_chunk(gm, mem, nb);
  1.4142 +          goto postaction;
  1.4143 +        }
  1.4144 +
  1.4145 +        else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  1.4146 +          check_malloced_chunk(gm, mem, nb);
  1.4147 +          goto postaction;
  1.4148 +        }
  1.4149 +      }
  1.4150 +    }
  1.4151 +    else if (bytes >= MAX_REQUEST)
  1.4152 +      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  1.4153 +    else {
  1.4154 +      nb = pad_request(bytes);
  1.4155 +      if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  1.4156 +        check_malloced_chunk(gm, mem, nb);
  1.4157 +        goto postaction;
  1.4158 +      }
  1.4159 +    }
  1.4160 +
  1.4161 +    if (nb <= gm->dvsize) {
  1.4162 +      size_t rsize = gm->dvsize - nb;
  1.4163 +      mchunkptr p = gm->dv;
  1.4164 +      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  1.4165 +        mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  1.4166 +        gm->dvsize = rsize;
  1.4167 +        set_size_and_pinuse_of_free_chunk(r, rsize);
  1.4168 +        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.4169 +      }
  1.4170 +      else { /* exhaust dv */
  1.4171 +        size_t dvs = gm->dvsize;
  1.4172 +        gm->dvsize = 0;
  1.4173 +        gm->dv = 0;
  1.4174 +        set_inuse_and_pinuse(gm, p, dvs);
  1.4175 +      }
  1.4176 +      mem = chunk2mem(p);
  1.4177 +      check_malloced_chunk(gm, mem, nb);
  1.4178 +      goto postaction;
  1.4179 +    }
  1.4180 +
  1.4181 +    else if (nb < gm->topsize) { /* Split top */
  1.4182 +      size_t rsize = gm->topsize -= nb;
  1.4183 +      mchunkptr p = gm->top;
  1.4184 +      mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  1.4185 +      r->head = rsize | PINUSE_BIT;
  1.4186 +      set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.4187 +      mem = chunk2mem(p);
  1.4188 +      check_top_chunk(gm, gm->top);
  1.4189 +      check_malloced_chunk(gm, mem, nb);
  1.4190 +      goto postaction;
  1.4191 +    }
  1.4192 +
  1.4193 +    mem = sys_alloc(gm, nb);
  1.4194 +
  1.4195 +  postaction:
  1.4196 +    POSTACTION(gm);
  1.4197 +    return mem;
  1.4198 +  }
  1.4199 +
  1.4200 +  return 0;
  1.4201 +}
  1.4202 +
  1.4203 +void dlfree(void* mem) {
  1.4204 +  /*
  1.4205 +     Consolidate freed chunks with preceeding or succeeding bordering
  1.4206 +     free chunks, if they exist, and then place in a bin.  Intermixed
  1.4207 +     with special cases for top, dv, mmapped chunks, and usage errors.
  1.4208 +  */
  1.4209 +
  1.4210 +  if (mem != 0) {
  1.4211 +    mchunkptr p  = mem2chunk(mem);
  1.4212 +#if FOOTERS
  1.4213 +    mstate fm = get_mstate_for(p);
  1.4214 +    if (!ok_magic(fm)) {
  1.4215 +      USAGE_ERROR_ACTION(fm, p);
  1.4216 +      return;
  1.4217 +    }
  1.4218 +#else /* FOOTERS */
  1.4219 +#define fm gm
  1.4220 +#endif /* FOOTERS */
  1.4221 +    if (!PREACTION(fm)) {
  1.4222 +      check_inuse_chunk(fm, p);
  1.4223 +      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  1.4224 +        size_t psize = chunksize(p);
  1.4225 +        mchunkptr next = chunk_plus_offset(p, psize);
  1.4226 +        if (!pinuse(p)) {
  1.4227 +          size_t prevsize = p->prev_foot;
  1.4228 +          if ((prevsize & IS_MMAPPED_BIT) != 0) {
  1.4229 +            prevsize &= ~IS_MMAPPED_BIT;
  1.4230 +            psize += prevsize + MMAP_FOOT_PAD;
  1.4231 +            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  1.4232 +              fm->footprint -= psize;
  1.4233 +            goto postaction;
  1.4234 +          }
  1.4235 +          else {
  1.4236 +            mchunkptr prev = chunk_minus_offset(p, prevsize);
  1.4237 +            psize += prevsize;
  1.4238 +            p = prev;
  1.4239 +            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  1.4240 +              if (p != fm->dv) {
  1.4241 +                unlink_chunk(fm, p, prevsize);
  1.4242 +              }
  1.4243 +              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  1.4244 +                fm->dvsize = psize;
  1.4245 +                set_free_with_pinuse(p, psize, next);
  1.4246 +                goto postaction;
  1.4247 +              }
  1.4248 +            }
  1.4249 +            else
  1.4250 +              goto erroraction;
  1.4251 +          }
  1.4252 +        }
  1.4253 +
  1.4254 +        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  1.4255 +          if (!cinuse(next)) {  /* consolidate forward */
  1.4256 +            if (next == fm->top) {
  1.4257 +              size_t tsize = fm->topsize += psize;
  1.4258 +              fm->top = p;
  1.4259 +              p->head = tsize | PINUSE_BIT;
  1.4260 +              if (p == fm->dv) {
  1.4261 +                fm->dv = 0;
  1.4262 +                fm->dvsize = 0;
  1.4263 +              }
  1.4264 +              if (should_trim(fm, tsize))
  1.4265 +                sys_trim(fm, 0);
  1.4266 +              goto postaction;
  1.4267 +            }
  1.4268 +            else if (next == fm->dv) {
  1.4269 +              size_t dsize = fm->dvsize += psize;
  1.4270 +              fm->dv = p;
  1.4271 +              set_size_and_pinuse_of_free_chunk(p, dsize);
  1.4272 +              goto postaction;
  1.4273 +            }
  1.4274 +            else {
  1.4275 +              size_t nsize = chunksize(next);
  1.4276 +              psize += nsize;
  1.4277 +              unlink_chunk(fm, next, nsize);
  1.4278 +              set_size_and_pinuse_of_free_chunk(p, psize);
  1.4279 +              if (p == fm->dv) {
  1.4280 +                fm->dvsize = psize;
  1.4281 +                goto postaction;
  1.4282 +              }
  1.4283 +            }
  1.4284 +          }
  1.4285 +          else
  1.4286 +            set_free_with_pinuse(p, psize, next);
  1.4287 +          insert_chunk(fm, p, psize);
  1.4288 +          check_free_chunk(fm, p);
  1.4289 +          goto postaction;
  1.4290 +        }
  1.4291 +      }
  1.4292 +    erroraction:
  1.4293 +      USAGE_ERROR_ACTION(fm, p);
  1.4294 +    postaction:
  1.4295 +      POSTACTION(fm);
  1.4296 +    }
  1.4297 +  }
  1.4298 +#if !FOOTERS
  1.4299 +#undef fm
  1.4300 +#endif /* FOOTERS */
  1.4301 +}
  1.4302 +
  1.4303 +void* dlcalloc(size_t n_elements, size_t elem_size) {
  1.4304 +  void* mem;
  1.4305 +  size_t req = 0;
  1.4306 +  if (n_elements != 0) {
  1.4307 +    req = n_elements * elem_size;
  1.4308 +    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  1.4309 +        (req / n_elements != elem_size))
  1.4310 +      req = MAX_SIZE_T; /* force downstream failure on overflow */
  1.4311 +  }
  1.4312 +  mem = dlmalloc(req);
  1.4313 +  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  1.4314 +    memset(mem, 0, req);
  1.4315 +  return mem;
  1.4316 +}
  1.4317 +
  1.4318 +void* dlrealloc(void* oldmem, size_t bytes) {
  1.4319 +  if (oldmem == 0)
  1.4320 +    return dlmalloc(bytes);
  1.4321 +#ifdef REALLOC_ZERO_BYTES_FREES
  1.4322 +  if (bytes == 0) {
  1.4323 +    dlfree(oldmem);
  1.4324 +    return 0;
  1.4325 +  }
  1.4326 +#endif /* REALLOC_ZERO_BYTES_FREES */
  1.4327 +  else {
  1.4328 +#if ! FOOTERS
  1.4329 +    mstate m = gm;
  1.4330 +#else /* FOOTERS */
  1.4331 +    mstate m = get_mstate_for(mem2chunk(oldmem));
  1.4332 +    if (!ok_magic(m)) {
  1.4333 +      USAGE_ERROR_ACTION(m, oldmem);
  1.4334 +      return 0;
  1.4335 +    }
  1.4336 +#endif /* FOOTERS */
  1.4337 +    return internal_realloc(m, oldmem, bytes);
  1.4338 +  }
  1.4339 +}
  1.4340 +
  1.4341 +void* dlmemalign(size_t alignment, size_t bytes) {
  1.4342 +  return internal_memalign(gm, alignment, bytes);
  1.4343 +}
  1.4344 +
  1.4345 +void** dlindependent_calloc(size_t n_elements, size_t elem_size,
  1.4346 +                                 void* chunks[]) {
  1.4347 +  size_t sz = elem_size; /* serves as 1-element array */
  1.4348 +  return ialloc(gm, n_elements, &sz, 3, chunks);
  1.4349 +}
  1.4350 +
  1.4351 +void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
  1.4352 +                                   void* chunks[]) {
  1.4353 +  return ialloc(gm, n_elements, sizes, 0, chunks);
  1.4354 +}
  1.4355 +
  1.4356 +void* dlvalloc(size_t bytes) {
  1.4357 +  size_t pagesz;
  1.4358 +  init_mparams();
  1.4359 +  pagesz = mparams.page_size;
  1.4360 +  return dlmemalign(pagesz, bytes);
  1.4361 +}
  1.4362 +
  1.4363 +void* dlpvalloc(size_t bytes) {
  1.4364 +  size_t pagesz;
  1.4365 +  init_mparams();
  1.4366 +  pagesz = mparams.page_size;
  1.4367 +  return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  1.4368 +}
  1.4369 +
  1.4370 +int dlmalloc_trim(size_t pad) {
  1.4371 +  int result = 0;
  1.4372 +  if (!PREACTION(gm)) {
  1.4373 +    result = sys_trim(gm, pad);
  1.4374 +    POSTACTION(gm);
  1.4375 +  }
  1.4376 +  return result;
  1.4377 +}
  1.4378 +
  1.4379 +size_t dlmalloc_footprint(void) {
  1.4380 +  return gm->footprint;
  1.4381 +}
  1.4382 +
  1.4383 +size_t dlmalloc_max_footprint(void) {
  1.4384 +  return gm->max_footprint;
  1.4385 +}
  1.4386 +
  1.4387 +#if !NO_MALLINFO
  1.4388 +struct mallinfo dlmallinfo(void) {
  1.4389 +  return internal_mallinfo(gm);
  1.4390 +}
  1.4391 +#endif /* NO_MALLINFO */
  1.4392 +
  1.4393 +void dlmalloc_stats() {
  1.4394 +  internal_malloc_stats(gm);
  1.4395 +}
  1.4396 +
  1.4397 +size_t dlmalloc_usable_size(void* mem) {
  1.4398 +  if (mem != 0) {
  1.4399 +    mchunkptr p = mem2chunk(mem);
  1.4400 +    if (cinuse(p))
  1.4401 +      return chunksize(p) - overhead_for(p);
  1.4402 +  }
  1.4403 +  return 0;
  1.4404 +}
  1.4405 +
  1.4406 +int dlmallopt(int param_number, int value) {
  1.4407 +  return change_mparam(param_number, value);
  1.4408 +}
  1.4409 +
  1.4410 +#endif /* !ONLY_MSPACES */
  1.4411 +
  1.4412 +/* ----------------------------- user mspaces ---------------------------- */
  1.4413 +
  1.4414 +#if MSPACES
  1.4415 +
  1.4416 +static mstate init_user_mstate(char* tbase, size_t tsize) {
  1.4417 +  size_t msize = pad_request(sizeof(struct malloc_state));
  1.4418 +  mchunkptr mn;
  1.4419 +  mchunkptr msp = align_as_chunk(tbase);
  1.4420 +  mstate m = (mstate)(chunk2mem(msp));
  1.4421 +  memset(m, 0, msize);
  1.4422 +  INITIAL_LOCK(&m->mutex);
  1.4423 +  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  1.4424 +  m->seg.base = m->least_addr = tbase;
  1.4425 +  m->seg.size = m->footprint = m->max_footprint = tsize;
  1.4426 +  m->magic = mparams.magic;
  1.4427 +  m->mflags = mparams.default_mflags;
  1.4428 +  disable_contiguous(m);
  1.4429 +  init_bins(m);
  1.4430 +  mn = next_chunk(mem2chunk(m));
  1.4431 +  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  1.4432 +  check_top_chunk(m, m->top);
  1.4433 +  return m;
  1.4434 +}
  1.4435 +
  1.4436 +mspace create_mspace(size_t capacity, int locked) {
  1.4437 +  mstate m = 0;
  1.4438 +  size_t msize = pad_request(sizeof(struct malloc_state));
  1.4439 +  init_mparams(); /* Ensure pagesize etc initialized */
  1.4440 +
  1.4441 +  if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  1.4442 +    size_t rs = ((capacity == 0)? mparams.granularity :
  1.4443 +                 (capacity + TOP_FOOT_SIZE + msize));
  1.4444 +    size_t tsize = granularity_align(rs);
  1.4445 +    char* tbase = (char*)(CALL_MMAP(tsize));
  1.4446 +    if (tbase != CMFAIL) {
  1.4447 +      m = init_user_mstate(tbase, tsize);
  1.4448 +      m->seg.sflags = IS_MMAPPED_BIT;
  1.4449 +      set_lock(m, locked);
  1.4450 +    }
  1.4451 +  }
  1.4452 +  return (mspace)m;
  1.4453 +}
  1.4454 +
  1.4455 +mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  1.4456 +  mstate m = 0;
  1.4457 +  size_t msize = pad_request(sizeof(struct malloc_state));
  1.4458 +  init_mparams(); /* Ensure pagesize etc initialized */
  1.4459 +
  1.4460 +  if (capacity > msize + TOP_FOOT_SIZE &&
  1.4461 +      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  1.4462 +    m = init_user_mstate((char*)base, capacity);
  1.4463 +    m->seg.sflags = EXTERN_BIT;
  1.4464 +    set_lock(m, locked);
  1.4465 +  }
  1.4466 +  return (mspace)m;
  1.4467 +}
  1.4468 +
  1.4469 +size_t destroy_mspace(mspace msp) {
  1.4470 +  size_t freed = 0;
  1.4471 +  mstate ms = (mstate)msp;
  1.4472 +  if (ok_magic(ms)) {
  1.4473 +    msegmentptr sp = &ms->seg;
  1.4474 +    while (sp != 0) {
  1.4475 +      char* base = sp->base;
  1.4476 +      size_t size = sp->size;
  1.4477 +      flag_t flag = sp->sflags;
  1.4478 +      sp = sp->next;
  1.4479 +      if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  1.4480 +          CALL_MUNMAP(base, size) == 0)
  1.4481 +        freed += size;
  1.4482 +    }
  1.4483 +  }
  1.4484 +  else {
  1.4485 +    USAGE_ERROR_ACTION(ms,ms);
  1.4486 +  }
  1.4487 +  return freed;
  1.4488 +}
  1.4489 +
  1.4490 +/*
  1.4491 +  mspace versions of routines are near-clones of the global
  1.4492 +  versions. This is not so nice but better than the alternatives.
  1.4493 +*/
  1.4494 +
  1.4495 +
  1.4496 +void* mspace_malloc(mspace msp, size_t bytes) {
  1.4497 +  mstate ms = (mstate)msp;
  1.4498 +  if (!ok_magic(ms)) {
  1.4499 +    USAGE_ERROR_ACTION(ms,ms);
  1.4500 +    return 0;
  1.4501 +  }
  1.4502 +  if (!PREACTION(ms)) {
  1.4503 +    void* mem;
  1.4504 +    size_t nb;
  1.4505 +    if (bytes <= MAX_SMALL_REQUEST) {
  1.4506 +      bindex_t idx;
  1.4507 +      binmap_t smallbits;
  1.4508 +      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  1.4509 +      idx = small_index(nb);
  1.4510 +      smallbits = ms->smallmap >> idx;
  1.4511 +
  1.4512 +      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  1.4513 +        mchunkptr b, p;
  1.4514 +        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  1.4515 +        b = smallbin_at(ms, idx);
  1.4516 +        p = b->fd;
  1.4517 +        assert(chunksize(p) == small_index2size(idx));
  1.4518 +        unlink_first_small_chunk(ms, b, p, idx);
  1.4519 +        set_inuse_and_pinuse(ms, p, small_index2size(idx));
  1.4520 +        mem = chunk2mem(p);
  1.4521 +        check_malloced_chunk(ms, mem, nb);
  1.4522 +        goto postaction;
  1.4523 +      }
  1.4524 +
  1.4525 +      else if (nb > ms->dvsize) {
  1.4526 +        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  1.4527 +          mchunkptr b, p, r;
  1.4528 +          size_t rsize;
  1.4529 +          bindex_t i;
  1.4530 +          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  1.4531 +          binmap_t leastbit = least_bit(leftbits);
  1.4532 +          compute_bit2idx(leastbit, i);
  1.4533 +          b = smallbin_at(ms, i);
  1.4534 +          p = b->fd;
  1.4535 +          assert(chunksize(p) == small_index2size(i));
  1.4536 +          unlink_first_small_chunk(ms, b, p, i);
  1.4537 +          rsize = small_index2size(i) - nb;
  1.4538 +          /* Fit here cannot be remainderless if 4byte sizes */
  1.4539 +          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  1.4540 +            set_inuse_and_pinuse(ms, p, small_index2size(i));
  1.4541 +          else {
  1.4542 +            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.4543 +            r = chunk_plus_offset(p, nb);
  1.4544 +            set_size_and_pinuse_of_free_chunk(r, rsize);
  1.4545 +            replace_dv(ms, r, rsize);
  1.4546 +          }
  1.4547 +          mem = chunk2mem(p);
  1.4548 +          check_malloced_chunk(ms, mem, nb);
  1.4549 +          goto postaction;
  1.4550 +        }
  1.4551 +
  1.4552 +        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  1.4553 +          check_malloced_chunk(ms, mem, nb);
  1.4554 +          goto postaction;
  1.4555 +        }
  1.4556 +      }
  1.4557 +    }
  1.4558 +    else if (bytes >= MAX_REQUEST)
  1.4559 +      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  1.4560 +    else {
  1.4561 +      nb = pad_request(bytes);
  1.4562 +      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  1.4563 +        check_malloced_chunk(ms, mem, nb);
  1.4564 +        goto postaction;
  1.4565 +      }
  1.4566 +    }
  1.4567 +
  1.4568 +    if (nb <= ms->dvsize) {
  1.4569 +      size_t rsize = ms->dvsize - nb;
  1.4570 +      mchunkptr p = ms->dv;
  1.4571 +      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
  1.4572 +        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  1.4573 +        ms->dvsize = rsize;
  1.4574 +        set_size_and_pinuse_of_free_chunk(r, rsize);
  1.4575 +        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.4576 +      }
  1.4577 +      else { /* exhaust dv */
  1.4578 +        size_t dvs = ms->dvsize;
  1.4579 +        ms->dvsize = 0;
  1.4580 +        ms->dv = 0;
  1.4581 +        set_inuse_and_pinuse(ms, p, dvs);
  1.4582 +      }
  1.4583 +      mem = chunk2mem(p);
  1.4584 +      check_malloced_chunk(ms, mem, nb);
  1.4585 +      goto postaction;
  1.4586 +    }
  1.4587 +
  1.4588 +    else if (nb < ms->topsize) { /* Split top */
  1.4589 +      size_t rsize = ms->topsize -= nb;
  1.4590 +      mchunkptr p = ms->top;
  1.4591 +      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  1.4592 +      r->head = rsize | PINUSE_BIT;
  1.4593 +      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.4594 +      mem = chunk2mem(p);
  1.4595 +      check_top_chunk(ms, ms->top);
  1.4596 +      check_malloced_chunk(ms, mem, nb);
  1.4597 +      goto postaction;
  1.4598 +    }
  1.4599 +
  1.4600 +    mem = sys_alloc(ms, nb);
  1.4601 +
  1.4602 +  postaction:
  1.4603 +    POSTACTION(ms);
  1.4604 +    return mem;
  1.4605 +  }
  1.4606 +
  1.4607 +  return 0;
  1.4608 +}
  1.4609 +
  1.4610 +void mspace_free(mspace msp, void* mem) {
  1.4611 +  if (mem != 0) {
  1.4612 +    mchunkptr p  = mem2chunk(mem);
  1.4613 +#if FOOTERS
  1.4614 +    mstate fm = get_mstate_for(p);
  1.4615 +#else /* FOOTERS */
  1.4616 +    mstate fm = (mstate)msp;
  1.4617 +#endif /* FOOTERS */
  1.4618 +    if (!ok_magic(fm)) {
  1.4619 +      USAGE_ERROR_ACTION(fm, p);
  1.4620 +      return;
  1.4621 +    }
  1.4622 +    if (!PREACTION(fm)) {
  1.4623 +      check_inuse_chunk(fm, p);
  1.4624 +      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  1.4625 +        size_t psize = chunksize(p);
  1.4626 +        mchunkptr next = chunk_plus_offset(p, psize);
  1.4627 +        if (!pinuse(p)) {
  1.4628 +          size_t prevsize = p->prev_foot;
  1.4629 +          if ((prevsize & IS_MMAPPED_BIT) != 0) {
  1.4630 +            prevsize &= ~IS_MMAPPED_BIT;
  1.4631 +            psize += prevsize + MMAP_FOOT_PAD;
  1.4632 +            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  1.4633 +              fm->footprint -= psize;
  1.4634 +            goto postaction;
  1.4635 +          }
  1.4636 +          else {
  1.4637 +            mchunkptr prev = chunk_minus_offset(p, prevsize);
  1.4638 +            psize += prevsize;
  1.4639 +            p = prev;
  1.4640 +            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  1.4641 +              if (p != fm->dv) {
  1.4642 +                unlink_chunk(fm, p, prevsize);
  1.4643 +              }
  1.4644 +              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  1.4645 +                fm->dvsize = psize;
  1.4646 +                set_free_with_pinuse(p, psize, next);
  1.4647 +                goto postaction;
  1.4648 +              }
  1.4649 +            }
  1.4650 +            else
  1.4651 +              goto erroraction;
  1.4652 +          }
  1.4653 +        }
  1.4654 +
  1.4655 +        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  1.4656 +          if (!cinuse(next)) {  /* consolidate forward */
  1.4657 +            if (next == fm->top) {
  1.4658 +              size_t tsize = fm->topsize += psize;
  1.4659 +              fm->top = p;
  1.4660 +              p->head = tsize | PINUSE_BIT;
  1.4661 +              if (p == fm->dv) {
  1.4662 +                fm->dv = 0;
  1.4663 +                fm->dvsize = 0;
  1.4664 +              }
  1.4665 +              if (should_trim(fm, tsize))
  1.4666 +                sys_trim(fm, 0);
  1.4667 +              goto postaction;
  1.4668 +            }
  1.4669 +            else if (next == fm->dv) {
  1.4670 +              size_t dsize = fm->dvsize += psize;
  1.4671 +              fm->dv = p;
  1.4672 +              set_size_and_pinuse_of_free_chunk(p, dsize);
  1.4673 +              goto postaction;
  1.4674 +            }
  1.4675 +            else {
  1.4676 +              size_t nsize = chunksize(next);
  1.4677 +              psize += nsize;
  1.4678 +              unlink_chunk(fm, next, nsize);
  1.4679 +              set_size_and_pinuse_of_free_chunk(p, psize);
  1.4680 +              if (p == fm->dv) {
  1.4681 +                fm->dvsize = psize;
  1.4682 +                goto postaction;
  1.4683 +              }
  1.4684 +            }
  1.4685 +          }
  1.4686 +          else
  1.4687 +            set_free_with_pinuse(p, psize, next);
  1.4688 +          insert_chunk(fm, p, psize);
  1.4689 +          check_free_chunk(fm, p);
  1.4690 +          goto postaction;
  1.4691 +        }
  1.4692 +      }
  1.4693 +    erroraction:
  1.4694 +      USAGE_ERROR_ACTION(fm, p);
  1.4695 +    postaction:
  1.4696 +      POSTACTION(fm);
  1.4697 +    }
  1.4698 +  }
  1.4699 +}
  1.4700 +
  1.4701 +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
  1.4702 +  void* mem;
  1.4703 +  size_t req = 0;
  1.4704 +  mstate ms = (mstate)msp;
  1.4705 +  if (!ok_magic(ms)) {
  1.4706 +    USAGE_ERROR_ACTION(ms,ms);
  1.4707 +    return 0;
  1.4708 +  }
  1.4709 +  if (n_elements != 0) {
  1.4710 +    req = n_elements * elem_size;
  1.4711 +    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  1.4712 +        (req / n_elements != elem_size))
  1.4713 +      req = MAX_SIZE_T; /* force downstream failure on overflow */
  1.4714 +  }
  1.4715 +  mem = internal_malloc(ms, req);
  1.4716 +  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  1.4717 +    memset(mem, 0, req);
  1.4718 +  return mem;
  1.4719 +}
  1.4720 +
  1.4721 +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
  1.4722 +  if (oldmem == 0)
  1.4723 +    return mspace_malloc(msp, bytes);
  1.4724 +#ifdef REALLOC_ZERO_BYTES_FREES
  1.4725 +  if (bytes == 0) {
  1.4726 +    mspace_free(msp, oldmem);
  1.4727 +    return 0;
  1.4728 +  }
  1.4729 +#endif /* REALLOC_ZERO_BYTES_FREES */
  1.4730 +  else {
  1.4731 +#if FOOTERS
  1.4732 +    mchunkptr p  = mem2chunk(oldmem);
  1.4733 +    mstate ms = get_mstate_for(p);
  1.4734 +#else /* FOOTERS */
  1.4735 +    mstate ms = (mstate)msp;
  1.4736 +#endif /* FOOTERS */
  1.4737 +    if (!ok_magic(ms)) {
  1.4738 +      USAGE_ERROR_ACTION(ms,ms);
  1.4739 +      return 0;
  1.4740 +    }
  1.4741 +    return internal_realloc(ms, oldmem, bytes);
  1.4742 +  }
  1.4743 +}
  1.4744 +
  1.4745 +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
  1.4746 +  mstate ms = (mstate)msp;
  1.4747 +  if (!ok_magic(ms)) {
  1.4748 +    USAGE_ERROR_ACTION(ms,ms);
  1.4749 +    return 0;
  1.4750 +  }
  1.4751 +  return internal_memalign(ms, alignment, bytes);
  1.4752 +}
  1.4753 +
  1.4754 +void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1.4755 +                                 size_t elem_size, void* chunks[]) {
  1.4756 +  size_t sz = elem_size; /* serves as 1-element array */
  1.4757 +  mstate ms = (mstate)msp;
  1.4758 +  if (!ok_magic(ms)) {
  1.4759 +    USAGE_ERROR_ACTION(ms,ms);
  1.4760 +    return 0;
  1.4761 +  }
  1.4762 +  return ialloc(ms, n_elements, &sz, 3, chunks);
  1.4763 +}
  1.4764 +
  1.4765 +void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1.4766 +                                   size_t sizes[], void* chunks[]) {
  1.4767 +  mstate ms = (mstate)msp;
  1.4768 +  if (!ok_magic(ms)) {
  1.4769 +    USAGE_ERROR_ACTION(ms,ms);
  1.4770 +    return 0;
  1.4771 +  }
  1.4772 +  return ialloc(ms, n_elements, sizes, 0, chunks);
  1.4773 +}
  1.4774 +
  1.4775 +int mspace_trim(mspace msp, size_t pad) {
  1.4776 +  int result = 0;
  1.4777 +  mstate ms = (mstate)msp;
  1.4778 +  if (ok_magic(ms)) {
  1.4779 +    if (!PREACTION(ms)) {
  1.4780 +      result = sys_trim(ms, pad);
  1.4781 +      POSTACTION(ms);
  1.4782 +    }
  1.4783 +  }
  1.4784 +  else {
  1.4785 +    USAGE_ERROR_ACTION(ms,ms);
  1.4786 +  }
  1.4787 +  return result;
  1.4788 +}
  1.4789 +
  1.4790 +void mspace_malloc_stats(mspace msp) {
  1.4791 +  mstate ms = (mstate)msp;
  1.4792 +  if (ok_magic(ms)) {
  1.4793 +    internal_malloc_stats(ms);
  1.4794 +  }
  1.4795 +  else {
  1.4796 +    USAGE_ERROR_ACTION(ms,ms);
  1.4797 +  }
  1.4798 +}
  1.4799 +
  1.4800 +size_t mspace_footprint(mspace msp) {
  1.4801 +  size_t result;
  1.4802 +  mstate ms = (mstate)msp;
  1.4803 +  if (ok_magic(ms)) {
  1.4804 +    result = ms->footprint;
  1.4805 +  }
  1.4806 +  USAGE_ERROR_ACTION(ms,ms);
  1.4807 +  return result;
  1.4808 +}
  1.4809 +
  1.4810 +
  1.4811 +size_t mspace_max_footprint(mspace msp) {
  1.4812 +  size_t result;
  1.4813 +  mstate ms = (mstate)msp;
  1.4814 +  if (ok_magic(ms)) {
  1.4815 +    result = ms->max_footprint;
  1.4816 +  }
  1.4817 +  USAGE_ERROR_ACTION(ms,ms);
  1.4818 +  return result;
  1.4819 +}
  1.4820 +
  1.4821 +
  1.4822 +#if !NO_MALLINFO
  1.4823 +struct mallinfo mspace_mallinfo(mspace msp) {
  1.4824 +  mstate ms = (mstate)msp;
  1.4825 +  if (!ok_magic(ms)) {
  1.4826 +    USAGE_ERROR_ACTION(ms,ms);
  1.4827 +  }
  1.4828 +  return internal_mallinfo(ms);
  1.4829 +}
  1.4830 +#endif /* NO_MALLINFO */
  1.4831 +
  1.4832 +int mspace_mallopt(int param_number, int value) {
  1.4833 +  return change_mparam(param_number, value);
  1.4834 +}
  1.4835 +
  1.4836 +#endif /* MSPACES */
  1.4837 +
  1.4838 +/* -------------------- Alternative MORECORE functions ------------------- */
  1.4839 +
  1.4840 +/*
  1.4841 +  Guidelines for creating a custom version of MORECORE:
  1.4842 +
  1.4843 +  * For best performance, MORECORE should allocate in multiples of pagesize.
  1.4844 +  * MORECORE may allocate more memory than requested. (Or even less,
  1.4845 +      but this will usually result in a malloc failure.)
  1.4846 +  * MORECORE must not allocate memory when given argument zero, but
  1.4847 +      instead return one past the end address of memory from previous
  1.4848 +      nonzero call.
  1.4849 +  * For best performance, consecutive calls to MORECORE with positive
  1.4850 +      arguments should return increasing addresses, indicating that
  1.4851 +      space has been contiguously extended.
  1.4852 +  * Even though consecutive calls to MORECORE need not return contiguous
  1.4853 +      addresses, it must be OK for malloc'ed chunks to span multiple
  1.4854 +      regions in those cases where they do happen to be contiguous.
  1.4855 +  * MORECORE need not handle negative arguments -- it may instead
  1.4856 +      just return MFAIL when given negative arguments.
  1.4857 +      Negative arguments are always multiples of pagesize. MORECORE
  1.4858 +      must not misinterpret negative args as large positive unsigned
  1.4859 +      args. You can suppress all such calls from even occurring by defining
  1.4860 +      MORECORE_CANNOT_TRIM,
  1.4861 +
  1.4862 +  As an example alternative MORECORE, here is a custom allocator
  1.4863 +  kindly contributed for pre-OSX macOS.  It uses virtually but not
  1.4864 +  necessarily physically contiguous non-paged memory (locked in,
  1.4865 +  present and won't get swapped out).  You can use it by uncommenting
  1.4866 +  this section, adding some #includes, and setting up the appropriate
  1.4867 +  defines above:
  1.4868 +
  1.4869 +      #define MORECORE osMoreCore
  1.4870 +
  1.4871 +  There is also a shutdown routine that should somehow be called for
  1.4872 +  cleanup upon program exit.
  1.4873 +
  1.4874 +  #define MAX_POOL_ENTRIES 100
  1.4875 +  #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
  1.4876 +  static int next_os_pool;
  1.4877 +  void *our_os_pools[MAX_POOL_ENTRIES];
  1.4878 +
  1.4879 +  void *osMoreCore(int size)
  1.4880 +  {
  1.4881 +    void *ptr = 0;
  1.4882 +    static void *sbrk_top = 0;
  1.4883 +
  1.4884 +    if (size > 0)
  1.4885 +    {
  1.4886 +      if (size < MINIMUM_MORECORE_SIZE)
  1.4887 +         size = MINIMUM_MORECORE_SIZE;
  1.4888 +      if (CurrentExecutionLevel() == kTaskLevel)
  1.4889 +         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
  1.4890 +      if (ptr == 0)
  1.4891 +      {
  1.4892 +        return (void *) MFAIL;
  1.4893 +      }
  1.4894 +      // save ptrs so they can be freed during cleanup
  1.4895 +      our_os_pools[next_os_pool] = ptr;
  1.4896 +      next_os_pool++;
  1.4897 +      ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
  1.4898 +      sbrk_top = (char *) ptr + size;
  1.4899 +      return ptr;
  1.4900 +    }
  1.4901 +    else if (size < 0)
  1.4902 +    {
  1.4903 +      // we don't currently support shrink behavior
  1.4904 +      return (void *) MFAIL;
  1.4905 +    }
  1.4906 +    else
  1.4907 +    {
  1.4908 +      return sbrk_top;
  1.4909 +    }
  1.4910 +  }
  1.4911 +
  1.4912 +  // cleanup any allocated memory pools
  1.4913 +  // called as last thing before shutting down driver
  1.4914 +
  1.4915 +  void osCleanupMem(void)
  1.4916 +  {
  1.4917 +    void **ptr;
  1.4918 +
  1.4919 +    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
  1.4920 +      if (*ptr)
  1.4921 +      {
  1.4922 +         PoolDeallocate(*ptr);
  1.4923 +         *ptr = 0;
  1.4924 +      }
  1.4925 +  }
  1.4926 +
  1.4927 +*/
  1.4928 +
  1.4929 +
  1.4930 +/* -----------------------------------------------------------------------
  1.4931 +History:
  1.4932 +    V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
  1.4933 +      * Add max_footprint functions
  1.4934 +      * Ensure all appropriate literals are size_t
  1.4935 +      * Fix conditional compilation problem for some #define settings
  1.4936 +      * Avoid concatenating segments with the one provided
  1.4937 +        in create_mspace_with_base
  1.4938 +      * Rename some variables to avoid compiler shadowing warnings
  1.4939 +      * Use explicit lock initialization.
  1.4940 +      * Better handling of sbrk interference.
  1.4941 +      * Simplify and fix segment insertion, trimming and mspace_destroy
  1.4942 +      * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
  1.4943 +      * Thanks especially to Dennis Flanagan for help on these.
  1.4944 +
  1.4945 +    V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
  1.4946 +      * Fix memalign brace error.
  1.4947 +
  1.4948 +    V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
  1.4949 +      * Fix improper #endif nesting in C++
  1.4950 +      * Add explicit casts needed for C++
  1.4951 +
  1.4952 +    V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
  1.4953 +      * Use trees for large bins
  1.4954 +      * Support mspaces
  1.4955 +      * Use segments to unify sbrk-based and mmap-based system allocation,
  1.4956 +        removing need for emulation on most platforms without sbrk.
  1.4957 +      * Default safety checks
  1.4958 +      * Optional footer checks. Thanks to William Robertson for the idea.
  1.4959 +      * Internal code refactoring
  1.4960 +      * Incorporate suggestions and platform-specific changes.
  1.4961 +        Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
  1.4962 +        Aaron Bachmann,  Emery Berger, and others.
  1.4963 +      * Speed up non-fastbin processing enough to remove fastbins.
  1.4964 +      * Remove useless cfree() to avoid conflicts with other apps.
  1.4965 +      * Remove internal memcpy, memset. Compilers handle builtins better.
  1.4966 +      * Remove some options that no one ever used and rename others.
  1.4967 +
  1.4968 +    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
  1.4969 +      * Fix malloc_state bitmap array misdeclaration
  1.4970 +
  1.4971 +    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
  1.4972 +      * Allow tuning of FIRST_SORTED_BIN_SIZE
  1.4973 +      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
  1.4974 +      * Better detection and support for non-contiguousness of MORECORE.
  1.4975 +        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
  1.4976 +      * Bypass most of malloc if no frees. Thanks To Emery Berger.
  1.4977 +      * Fix freeing of old top non-contiguous chunk im sysmalloc.
  1.4978 +      * Raised default trim and map thresholds to 256K.
  1.4979 +      * Fix mmap-related #defines. Thanks to Lubos Lunak.
  1.4980 +      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
  1.4981 +      * Branch-free bin calculation
  1.4982 +      * Default trim and mmap thresholds now 256K.
  1.4983 +
  1.4984 +    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
  1.4985 +      * Introduce independent_comalloc and independent_calloc.
  1.4986 +        Thanks to Michael Pachos for motivation and help.
  1.4987 +      * Make optional .h file available
  1.4988 +      * Allow > 2GB requests on 32bit systems.
  1.4989 +      * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
  1.4990 +        Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
  1.4991 +        and Anonymous.
  1.4992 +      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
  1.4993 +        helping test this.)
  1.4994 +      * memalign: check alignment arg
  1.4995 +      * realloc: don't try to shift chunks backwards, since this
  1.4996 +        leads to  more fragmentation in some programs and doesn't
  1.4997 +        seem to help in any others.
  1.4998 +      * Collect all cases in malloc requiring system memory into sysmalloc
  1.4999 +      * Use mmap as backup to sbrk
  1.5000 +      * Place all internal state in malloc_state
  1.5001 +      * Introduce fastbins (although similar to 2.5.1)
  1.5002 +      * Many minor tunings and cosmetic improvements
  1.5003 +      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
  1.5004 +      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
  1.5005 +        Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
  1.5006 +      * Include errno.h to support default failure action.
  1.5007 +
  1.5008 +    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
  1.5009 +      * return null for negative arguments
  1.5010 +      * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
  1.5011 +         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
  1.5012 +          (e.g. WIN32 platforms)
  1.5013 +         * Cleanup header file inclusion for WIN32 platforms
  1.5014 +         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
  1.5015 +         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
  1.5016 +           memory allocation routines
  1.5017 +         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
  1.5018 +         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
  1.5019 +           usage of 'assert' in non-WIN32 code
  1.5020 +         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
  1.5021 +           avoid infinite loop
  1.5022 +      * Always call 'fREe()' rather than 'free()'
  1.5023 +
  1.5024 +    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
  1.5025 +      * Fixed ordering problem with boundary-stamping
  1.5026 +
  1.5027 +    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
  1.5028 +      * Added pvalloc, as recommended by H.J. Liu
  1.5029 +      * Added 64bit pointer support mainly from Wolfram Gloger
  1.5030 +      * Added anonymously donated WIN32 sbrk emulation
  1.5031 +      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
  1.5032 +      * malloc_extend_top: fix mask error that caused wastage after
  1.5033 +        foreign sbrks
  1.5034 +      * Add linux mremap support code from HJ Liu
  1.5035 +
  1.5036 +    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
  1.5037 +      * Integrated most documentation with the code.
  1.5038 +      * Add support for mmap, with help from
  1.5039 +        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  1.5040 +      * Use last_remainder in more cases.
  1.5041 +      * Pack bins using idea from  colin@nyx10.cs.du.edu
  1.5042 +      * Use ordered bins instead of best-fit threshhold
  1.5043 +      * Eliminate block-local decls to simplify tracing and debugging.
  1.5044 +      * Support another case of realloc via move into top
  1.5045 +      * Fix error occuring when initial sbrk_base not word-aligned.
  1.5046 +      * Rely on page size for units instead of SBRK_UNIT to
  1.5047 +        avoid surprises about sbrk alignment conventions.
  1.5048 +      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
  1.5049 +        (raymond@es.ele.tue.nl) for the suggestion.
  1.5050 +      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
  1.5051 +      * More precautions for cases where other routines call sbrk,
  1.5052 +        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
  1.5053 +      * Added macros etc., allowing use in linux libc from
  1.5054 +        H.J. Lu (hjl@gnu.ai.mit.edu)
  1.5055 +      * Inverted this history list
  1.5056 +
  1.5057 +    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
  1.5058 +      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
  1.5059 +      * Removed all preallocation code since under current scheme
  1.5060 +        the work required to undo bad preallocations exceeds
  1.5061 +        the work saved in good cases for most test programs.
  1.5062 +      * No longer use return list or unconsolidated bins since
  1.5063 +        no scheme using them consistently outperforms those that don't
  1.5064 +        given above changes.
  1.5065 +      * Use best fit for very large chunks to prevent some worst-cases.
  1.5066 +      * Added some support for debugging
  1.5067 +
  1.5068 +    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
  1.5069 +      * Removed footers when chunks are in use. Thanks to
  1.5070 +        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
  1.5071 +
  1.5072 +    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
  1.5073 +      * Added malloc_trim, with help from Wolfram Gloger
  1.5074 +        (wmglo@Dent.MED.Uni-Muenchen.DE).
  1.5075 +
  1.5076 +    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
  1.5077 +
  1.5078 +    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
  1.5079 +      * realloc: try to expand in both directions
  1.5080 +      * malloc: swap order of clean-bin strategy;
  1.5081 +      * realloc: only conditionally expand backwards
  1.5082 +      * Try not to scavenge used bins
  1.5083 +      * Use bin counts as a guide to preallocation
  1.5084 +      * Occasionally bin return list chunks in first scan
  1.5085 +      * Add a few optimizations from colin@nyx10.cs.du.edu
  1.5086 +
  1.5087 +    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
  1.5088 +      * faster bin computation & slightly different binning
  1.5089 +      * merged all consolidations to one part of malloc proper
  1.5090 +         (eliminating old malloc_find_space & malloc_clean_bin)
  1.5091 +      * Scan 2 returns chunks (not just 1)
  1.5092 +      * Propagate failure in realloc if malloc returns 0
  1.5093 +      * Add stuff to allow compilation on non-ANSI compilers
  1.5094 +          from kpv@research.att.com
  1.5095 +
  1.5096 +    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
  1.5097 +      * removed potential for odd address access in prev_chunk
  1.5098 +      * removed dependency on getpagesize.h
  1.5099 +      * misc cosmetics and a bit more internal documentation
  1.5100 +      * anticosmetics: mangled names in macros to evade debugger strangeness
  1.5101 +      * tested on sparc, hp-700, dec-mips, rs6000
  1.5102 +          with gcc & native cc (hp, dec only) allowing
  1.5103 +          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
  1.5104 +
  1.5105 +    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
  1.5106 +      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
  1.5107 +         structure of old version,  but most details differ.)
  1.5108 + 
  1.5109 +*/
  1.5110 +
  1.5111 +#endif /* !HAVE_MALLOC */
  1.5112 \ No newline at end of file