src/stdlib/SDL_malloc.c
branchSDL-1.3
changeset 1668 4da1ee79c9af
parent 1662 782fd950bd46
     1.1 --- a/src/stdlib/SDL_malloc.c	Mon May 29 03:53:21 2006 +0000
     1.2 +++ b/src/stdlib/SDL_malloc.c	Mon May 29 04:04:35 2006 +0000
     1.3 @@ -718,7 +718,7 @@
     1.4    maximum supported value of n differs across systems, but is in all
     1.5    cases less than the maximum representable value of a size_t.
     1.6  */
     1.7 -    void *dlmalloc (size_t);
     1.8 +    void *dlmalloc(size_t);
     1.9  
    1.10  /*
    1.11    free(void* p)
    1.12 @@ -727,14 +727,14 @@
    1.13    It has no effect if p is null. If p was not malloced or already
    1.14    freed, free(p) will by default cause the current program to abort.
    1.15  */
    1.16 -    void dlfree (void *);
    1.17 +    void dlfree(void *);
    1.18  
    1.19  /*
    1.20    calloc(size_t n_elements, size_t element_size);
    1.21    Returns a pointer to n_elements * element_size bytes, with all locations
    1.22    set to zero.
    1.23  */
    1.24 -    void *dlcalloc (size_t, size_t);
    1.25 +    void *dlcalloc(size_t, size_t);
    1.26  
    1.27  /*
    1.28    realloc(void* p, size_t n)
    1.29 @@ -759,7 +759,7 @@
    1.30    to be used as an argument to realloc is not supported.
    1.31  */
    1.32  
    1.33 -    void *dlrealloc (void *, size_t);
    1.34 +    void *dlrealloc(void *, size_t);
    1.35  
    1.36  /*
    1.37    memalign(size_t alignment, size_t n);
    1.38 @@ -773,14 +773,14 @@
    1.39  
    1.40    Overreliance on memalign is a sure way to fragment space.
    1.41  */
    1.42 -    void *dlmemalign (size_t, size_t);
    1.43 +    void *dlmemalign(size_t, size_t);
    1.44  
    1.45  /*
    1.46    valloc(size_t n);
    1.47    Equivalent to memalign(pagesize, n), where pagesize is the page
    1.48    size of the system. If the pagesize is unknown, 4096 is used.
    1.49  */
    1.50 -    void *dlvalloc (size_t);
    1.51 +    void *dlvalloc(size_t);
    1.52  
    1.53  /*
    1.54    mallopt(int parameter_number, int parameter_value)
    1.55 @@ -800,7 +800,7 @@
    1.56    M_GRANULARITY        -2     page size   any power of 2 >= page size
    1.57    M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
    1.58  */
    1.59 -    int dlmallopt (int, int);
    1.60 +    int dlmallopt(int, int);
    1.61  
    1.62  /*
    1.63    malloc_footprint();
    1.64 @@ -811,7 +811,7 @@
    1.65    Even if locks are otherwise defined, this function does not use them,
    1.66    so results might not be up to date.
    1.67  */
    1.68 -    size_t dlmalloc_footprint (void);
    1.69 +    size_t dlmalloc_footprint(void);
    1.70  
    1.71  /*
    1.72    malloc_max_footprint();
    1.73 @@ -824,7 +824,7 @@
    1.74    otherwise defined, this function does not use them, so results might
    1.75    not be up to date.
    1.76  */
    1.77 -    size_t dlmalloc_max_footprint (void);
    1.78 +    size_t dlmalloc_max_footprint(void);
    1.79  
    1.80  #if !NO_MALLINFO
    1.81  /*
    1.82 @@ -849,7 +849,7 @@
    1.83    be kept as longs, the reported values may wrap around zero and
    1.84    thus be inaccurate.
    1.85  */
    1.86 -    struct mallinfo dlmallinfo (void);
    1.87 +    struct mallinfo dlmallinfo(void);
    1.88  #endif                          /* NO_MALLINFO */
    1.89  
    1.90  /*
    1.91 @@ -904,7 +904,7 @@
    1.92      return first;
    1.93    }
    1.94  */
    1.95 -    void **dlindependent_calloc (size_t, size_t, void **);
    1.96 +    void **dlindependent_calloc(size_t, size_t, void **);
    1.97  
    1.98  /*
    1.99    independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   1.100 @@ -965,7 +965,7 @@
   1.101    since it cannot reuse existing noncontiguous small chunks that
   1.102    might be available for some of the elements.
   1.103  */
   1.104 -    void **dlindependent_comalloc (size_t, size_t *, void **);
   1.105 +    void **dlindependent_comalloc(size_t, size_t *, void **);
   1.106  
   1.107  
   1.108  /*
   1.109 @@ -973,7 +973,7 @@
   1.110    Equivalent to valloc(minimum-page-that-holds(n)), that is,
   1.111    round up n to nearest pagesize.
   1.112   */
   1.113 -    void *dlpvalloc (size_t);
   1.114 +    void *dlpvalloc(size_t);
   1.115  
   1.116  /*
   1.117    malloc_trim(size_t pad);
   1.118 @@ -996,7 +996,7 @@
   1.119  
   1.120    Malloc_trim returns 1 if it actually released any memory, else 0.
   1.121  */
   1.122 -    int dlmalloc_trim (size_t);
   1.123 +    int dlmalloc_trim(size_t);
   1.124  
   1.125  /*
   1.126    malloc_usable_size(void* p);
   1.127 @@ -1012,7 +1012,7 @@
   1.128    p = malloc(n);
   1.129    assert(malloc_usable_size(p) >= 256);
   1.130  */
   1.131 -    size_t dlmalloc_usable_size (void *);
   1.132 +    size_t dlmalloc_usable_size(void *);
   1.133  
   1.134  /*
   1.135    malloc_stats();
   1.136 @@ -1033,7 +1033,7 @@
   1.137    malloc_stats prints only the most commonly interesting statistics.
   1.138    More information can be obtained by calling mallinfo.
   1.139  */
   1.140 -    void dlmalloc_stats (void);
   1.141 +    void dlmalloc_stats(void);
   1.142  
   1.143  #endif                          /* ONLY_MSPACES */
   1.144  
   1.145 @@ -1056,7 +1056,7 @@
   1.146    compiling with a different DEFAULT_GRANULARITY or dynamically
   1.147    setting with mallopt(M_GRANULARITY, value).
   1.148  */
   1.149 -    mspace create_mspace (size_t capacity, int locked);
   1.150 +    mspace create_mspace(size_t capacity, int locked);
   1.151  
   1.152  /*
   1.153    destroy_mspace destroys the given space, and attempts to return all
   1.154 @@ -1064,7 +1064,7 @@
   1.155    bytes freed. After destruction, the results of access to all memory
   1.156    used by the space become undefined.
   1.157  */
   1.158 -    size_t destroy_mspace (mspace msp);
   1.159 +    size_t destroy_mspace(mspace msp);
   1.160  
   1.161  /*
   1.162    create_mspace_with_base uses the memory supplied as the initial base
   1.163 @@ -1075,13 +1075,13 @@
   1.164    Destroying this space will deallocate all additionally allocated
   1.165    space (if possible) but not the initial base.
   1.166  */
   1.167 -    mspace create_mspace_with_base (void *base, size_t capacity, int locked);
   1.168 +    mspace create_mspace_with_base(void *base, size_t capacity, int locked);
   1.169  
   1.170  /*
   1.171    mspace_malloc behaves as malloc, but operates within
   1.172    the given space.
   1.173  */
   1.174 -    void *mspace_malloc (mspace msp, size_t bytes);
   1.175 +    void *mspace_malloc(mspace msp, size_t bytes);
   1.176  
   1.177  /*
   1.178    mspace_free behaves as free, but operates within
   1.179 @@ -1091,7 +1091,7 @@
   1.180    free may be called instead of mspace_free because freed chunks from
   1.181    any space are handled by their originating spaces.
   1.182  */
   1.183 -    void mspace_free (mspace msp, void *mem);
   1.184 +    void mspace_free(mspace msp, void *mem);
   1.185  
   1.186  /*
   1.187    mspace_realloc behaves as realloc, but operates within
   1.188 @@ -1102,45 +1102,45 @@
   1.189    realloced chunks from any space are handled by their originating
   1.190    spaces.
   1.191  */
   1.192 -    void *mspace_realloc (mspace msp, void *mem, size_t newsize);
   1.193 +    void *mspace_realloc(mspace msp, void *mem, size_t newsize);
   1.194  
   1.195  /*
   1.196    mspace_calloc behaves as calloc, but operates within
   1.197    the given space.
   1.198  */
   1.199 -    void *mspace_calloc (mspace msp, size_t n_elements, size_t elem_size);
   1.200 +    void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
   1.201  
   1.202  /*
   1.203    mspace_memalign behaves as memalign, but operates within
   1.204    the given space.
   1.205  */
   1.206 -    void *mspace_memalign (mspace msp, size_t alignment, size_t bytes);
   1.207 +    void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
   1.208  
   1.209  /*
   1.210    mspace_independent_calloc behaves as independent_calloc, but
   1.211    operates within the given space.
   1.212  */
   1.213 -    void **mspace_independent_calloc (mspace msp, size_t n_elements,
   1.214 -                                      size_t elem_size, void *chunks[]);
   1.215 +    void **mspace_independent_calloc(mspace msp, size_t n_elements,
   1.216 +                                     size_t elem_size, void *chunks[]);
   1.217  
   1.218  /*
   1.219    mspace_independent_comalloc behaves as independent_comalloc, but
   1.220    operates within the given space.
   1.221  */
   1.222 -    void **mspace_independent_comalloc (mspace msp, size_t n_elements,
   1.223 -                                        size_t sizes[], void *chunks[]);
   1.224 +    void **mspace_independent_comalloc(mspace msp, size_t n_elements,
   1.225 +                                       size_t sizes[], void *chunks[]);
   1.226  
   1.227  /*
   1.228    mspace_footprint() returns the number of bytes obtained from the
   1.229    system for this space.
   1.230  */
   1.231 -    size_t mspace_footprint (mspace msp);
   1.232 +    size_t mspace_footprint(mspace msp);
   1.233  
   1.234  /*
   1.235    mspace_max_footprint() returns the peak number of bytes obtained from the
   1.236    system for this space.
   1.237  */
   1.238 -    size_t mspace_max_footprint (mspace msp);
   1.239 +    size_t mspace_max_footprint(mspace msp);
   1.240  
   1.241  
   1.242  #if !NO_MALLINFO
   1.243 @@ -1148,25 +1148,25 @@
   1.244    mspace_mallinfo behaves as mallinfo, but reports properties of
   1.245    the given space.
   1.246  */
   1.247 -    struct mallinfo mspace_mallinfo (mspace msp);
   1.248 +    struct mallinfo mspace_mallinfo(mspace msp);
   1.249  #endif                          /* NO_MALLINFO */
   1.250  
   1.251  /*
   1.252    mspace_malloc_stats behaves as malloc_stats, but reports
   1.253    properties of the given space.
   1.254  */
   1.255 -    void mspace_malloc_stats (mspace msp);
   1.256 +    void mspace_malloc_stats(mspace msp);
   1.257  
   1.258  /*
   1.259    mspace_trim behaves as malloc_trim, but
   1.260    operates within the given space.
   1.261  */
   1.262 -    int mspace_trim (mspace msp, size_t pad);
   1.263 +    int mspace_trim(mspace msp, size_t pad);
   1.264  
   1.265  /*
   1.266    An alias for mallopt.
   1.267  */
   1.268 -    int mspace_mallopt (int, int);
   1.269 +    int mspace_mallopt(int, int);
   1.270  
   1.271  #endif                          /* MSPACES */
   1.272  
   1.273 @@ -1233,7 +1233,7 @@
   1.274  #include <unistd.h>             /* for sbrk */
   1.275  #else /* LACKS_UNISTD_H */
   1.276  #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
   1.277 -extern void *sbrk (ptrdiff_t);
   1.278 +extern void *sbrk(ptrdiff_t);
   1.279  #endif /* FreeBSD etc */
   1.280  #endif /* LACKS_UNISTD_H */
   1.281  #endif /* HAVE_MMAP */
   1.282 @@ -1249,7 +1249,7 @@
   1.283  #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
   1.284  #  else
   1.285  #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
   1.286 -extern size_t getpagesize ();
   1.287 +extern size_t getpagesize();
   1.288  #      define malloc_getpagesize getpagesize()
   1.289  #    else
   1.290  #      ifdef WIN32              /* use supplied emulation of getpagesize */
   1.291 @@ -1363,36 +1363,35 @@
   1.292  
   1.293  /* Win32 MMAP via VirtualAlloc */
   1.294  static void *
   1.295 -win32mmap (size_t size)
   1.296 +win32mmap(size_t size)
   1.297  {
   1.298      void *ptr =
   1.299 -        VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
   1.300 +        VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
   1.301      return (ptr != 0) ? ptr : MFAIL;
   1.302  }
   1.303  
   1.304  /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
   1.305  static void *
   1.306 -win32direct_mmap (size_t size)
   1.307 +win32direct_mmap(size_t size)
   1.308  {
   1.309 -    void *ptr =
   1.310 -        VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
   1.311 -                      PAGE_READWRITE);
   1.312 +    void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
   1.313 +                             PAGE_READWRITE);
   1.314      return (ptr != 0) ? ptr : MFAIL;
   1.315  }
   1.316  
   1.317  /* This function supports releasing coalesed segments */
   1.318  static int
   1.319 -win32munmap (void *ptr, size_t size)
   1.320 +win32munmap(void *ptr, size_t size)
   1.321  {
   1.322      MEMORY_BASIC_INFORMATION minfo;
   1.323      char *cptr = ptr;
   1.324      while (size) {
   1.325 -        if (VirtualQuery (cptr, &minfo, sizeof (minfo)) == 0)
   1.326 +        if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
   1.327              return -1;
   1.328          if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
   1.329              minfo.State != MEM_COMMIT || minfo.RegionSize > size)
   1.330              return -1;
   1.331 -        if (VirtualFree (cptr, 0, MEM_RELEASE) == 0)
   1.332 +        if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
   1.333              return -1;
   1.334          cptr += minfo.RegionSize;
   1.335          size -= minfo.RegionSize;
   1.336 @@ -1465,25 +1464,24 @@
   1.337  
   1.338  #define MLOCK_T long
   1.339  static int
   1.340 -win32_acquire_lock (MLOCK_T * sl)
   1.341 +win32_acquire_lock(MLOCK_T * sl)
   1.342  {
   1.343      for (;;) {
   1.344  #ifdef InterlockedCompareExchangePointer
   1.345 -        if (!InterlockedCompareExchange (sl, 1, 0))
   1.346 +        if (!InterlockedCompareExchange(sl, 1, 0))
   1.347              return 0;
   1.348  #else /* Use older void* version */
   1.349 -        if (!InterlockedCompareExchange
   1.350 -            ((void **) sl, (void *) 1, (void *) 0))
   1.351 +        if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
   1.352              return 0;
   1.353  #endif /* InterlockedCompareExchangePointer */
   1.354 -        Sleep (0);
   1.355 +        Sleep(0);
   1.356      }
   1.357  }
   1.358  
   1.359  static void
   1.360 -win32_release_lock (MLOCK_T * sl)
   1.361 +win32_release_lock(MLOCK_T * sl)
   1.362  {
   1.363 -    InterlockedExchange (sl, 0);
   1.364 +    InterlockedExchange(sl, 0);
   1.365  }
   1.366  
   1.367  #define INITIAL_LOCK(l)      *(l)=0
   1.368 @@ -2126,7 +2124,7 @@
   1.369  
   1.370  /* Return segment holding given address */
   1.371  static msegmentptr
   1.372 -segment_holding (mstate m, char *addr)
   1.373 +segment_holding(mstate m, char *addr)
   1.374  {
   1.375      msegmentptr sp = &m->seg;
   1.376      for (;;) {
   1.377 @@ -2139,7 +2137,7 @@
   1.378  
   1.379  /* Return true if segment contains a segment link */
   1.380  static int
   1.381 -has_segment_link (mstate m, msegmentptr ss)
   1.382 +has_segment_link(mstate m, msegmentptr ss)
   1.383  {
   1.384      msegmentptr sp = &m->seg;
   1.385      for (;;) {
   1.386 @@ -2206,7 +2204,7 @@
   1.387  int malloc_corruption_error_count;
   1.388  
   1.389  /* default corruption action */
   1.390 -static void reset_on_error (mstate m);
   1.391 +static void reset_on_error(mstate m);
   1.392  
   1.393  #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
   1.394  #define USAGE_ERROR_ACTION(m, p)
   1.395 @@ -2242,18 +2240,18 @@
   1.396  #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
   1.397  #define check_malloc_state(M)       do_check_malloc_state(M)
   1.398  
   1.399 -static void do_check_any_chunk (mstate m, mchunkptr p);
   1.400 -static void do_check_top_chunk (mstate m, mchunkptr p);
   1.401 -static void do_check_mmapped_chunk (mstate m, mchunkptr p);
   1.402 -static void do_check_inuse_chunk (mstate m, mchunkptr p);
   1.403 -static void do_check_free_chunk (mstate m, mchunkptr p);
   1.404 -static void do_check_malloced_chunk (mstate m, void *mem, size_t s);
   1.405 -static void do_check_tree (mstate m, tchunkptr t);
   1.406 -static void do_check_treebin (mstate m, bindex_t i);
   1.407 -static void do_check_smallbin (mstate m, bindex_t i);
   1.408 -static void do_check_malloc_state (mstate m);
   1.409 -static int bin_find (mstate m, mchunkptr x);
   1.410 -static size_t traverse_and_check (mstate m);
   1.411 +static void do_check_any_chunk(mstate m, mchunkptr p);
   1.412 +static void do_check_top_chunk(mstate m, mchunkptr p);
   1.413 +static void do_check_mmapped_chunk(mstate m, mchunkptr p);
   1.414 +static void do_check_inuse_chunk(mstate m, mchunkptr p);
   1.415 +static void do_check_free_chunk(mstate m, mchunkptr p);
   1.416 +static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
   1.417 +static void do_check_tree(mstate m, tchunkptr t);
   1.418 +static void do_check_treebin(mstate m, bindex_t i);
   1.419 +static void do_check_smallbin(mstate m, bindex_t i);
   1.420 +static void do_check_malloc_state(mstate m);
   1.421 +static int bin_find(mstate m, mchunkptr x);
   1.422 +static size_t traverse_and_check(mstate m);
   1.423  #endif /* DEBUG */
   1.424  
   1.425  /* ---------------------------- Indexing Bins ---------------------------- */
   1.426 @@ -2484,7 +2482,7 @@
   1.427  
   1.428  /* Initialize mparams */
   1.429  static int
   1.430 -init_mparams (void)
   1.431 +init_mparams(void)
   1.432  {
   1.433      if (mparams.page_size == 0) {
   1.434          size_t s;
   1.435 @@ -2502,15 +2500,15 @@
   1.436          {
   1.437  #if USE_DEV_RANDOM
   1.438              int fd;
   1.439 -            unsigned char buf[sizeof (size_t)];
   1.440 +            unsigned char buf[sizeof(size_t)];
   1.441              /* Try to use /dev/urandom, else fall back on using time */
   1.442 -            if ((fd = open ("/dev/urandom", O_RDONLY)) >= 0 &&
   1.443 -                read (fd, buf, sizeof (buf)) == sizeof (buf)) {
   1.444 +            if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
   1.445 +                read(fd, buf, sizeof(buf)) == sizeof(buf)) {
   1.446                  s = *((size_t *) buf);
   1.447 -                close (fd);
   1.448 +                close(fd);
   1.449              } else
   1.450  #endif /* USE_DEV_RANDOM */
   1.451 -                s = (size_t) (time (0) ^ (size_t) 0x55555555U);
   1.452 +                s = (size_t) (time(0) ^ (size_t) 0x55555555U);
   1.453  
   1.454              s |= (size_t) 8U;   /* ensure nonzero */
   1.455              s &= ~(size_t) 7U;  /* improve chances of fault for bad values */
   1.456 @@ -2519,14 +2517,14 @@
   1.457  #else /* (FOOTERS && !INSECURE) */
   1.458          s = (size_t) 0x58585858U;
   1.459  #endif /* (FOOTERS && !INSECURE) */
   1.460 -        ACQUIRE_MAGIC_INIT_LOCK ();
   1.461 +        ACQUIRE_MAGIC_INIT_LOCK();
   1.462          if (mparams.magic == 0) {
   1.463              mparams.magic = s;
   1.464              /* Set up lock for main malloc area */
   1.465 -            INITIAL_LOCK (&gm->mutex);
   1.466 +            INITIAL_LOCK(&gm->mutex);
   1.467              gm->mflags = mparams.default_mflags;
   1.468          }
   1.469 -        RELEASE_MAGIC_INIT_LOCK ();
   1.470 +        RELEASE_MAGIC_INIT_LOCK();
   1.471  
   1.472  #ifndef WIN32
   1.473          mparams.page_size = malloc_getpagesize;
   1.474 @@ -2535,7 +2533,7 @@
   1.475  #else /* WIN32 */
   1.476          {
   1.477              SYSTEM_INFO system_info;
   1.478 -            GetSystemInfo (&system_info);
   1.479 +            GetSystemInfo(&system_info);
   1.480              mparams.page_size = system_info.dwPageSize;
   1.481              mparams.granularity = system_info.dwAllocationGranularity;
   1.482          }
   1.483 @@ -2547,9 +2545,9 @@
   1.484             alignment must be at least 8.
   1.485             Alignment, min chunk size, and page size must all be powers of 2.
   1.486           */
   1.487 -        if ((sizeof (size_t) != sizeof (char *)) ||
   1.488 +        if ((sizeof(size_t) != sizeof(char *)) ||
   1.489              (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
   1.490 -            (sizeof (int) < 4) ||
   1.491 +            (sizeof(int) < 4) ||
   1.492              (MALLOC_ALIGNMENT < (size_t) 8U) ||
   1.493              ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
   1.494              ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
   1.495 @@ -2562,10 +2560,10 @@
   1.496  
   1.497  /* support for mallopt */
   1.498  static int
   1.499 -change_mparam (int param_number, int value)
   1.500 +change_mparam(int param_number, int value)
   1.501  {
   1.502      size_t val = (size_t) value;
   1.503 -    init_mparams ();
   1.504 +    init_mparams();
   1.505      switch (param_number) {
   1.506      case M_TRIM_THRESHOLD:
   1.507          mparams.trim_threshold = val;
   1.508 @@ -2589,199 +2587,199 @@
   1.509  
   1.510  /* Check properties of any chunk, whether free, inuse, mmapped etc  */
   1.511  static void
   1.512 -do_check_any_chunk (mstate m, mchunkptr p)
   1.513 +do_check_any_chunk(mstate m, mchunkptr p)
   1.514  {
   1.515 -    assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
   1.516 -    assert (ok_address (m, p));
   1.517 +    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   1.518 +    assert(ok_address(m, p));
   1.519  }
   1.520  
   1.521  /* Check properties of top chunk */
   1.522  static void
   1.523 -do_check_top_chunk (mstate m, mchunkptr p)
   1.524 +do_check_top_chunk(mstate m, mchunkptr p)
   1.525  {
   1.526 -    msegmentptr sp = segment_holding (m, (char *) p);
   1.527 -    size_t sz = chunksize (p);
   1.528 -    assert (sp != 0);
   1.529 -    assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
   1.530 -    assert (ok_address (m, p));
   1.531 -    assert (sz == m->topsize);
   1.532 -    assert (sz > 0);
   1.533 -    assert (sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
   1.534 -    assert (pinuse (p));
   1.535 -    assert (!next_pinuse (p));
   1.536 +    msegmentptr sp = segment_holding(m, (char *) p);
   1.537 +    size_t sz = chunksize(p);
   1.538 +    assert(sp != 0);
   1.539 +    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   1.540 +    assert(ok_address(m, p));
   1.541 +    assert(sz == m->topsize);
   1.542 +    assert(sz > 0);
   1.543 +    assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
   1.544 +    assert(pinuse(p));
   1.545 +    assert(!next_pinuse(p));
   1.546  }
   1.547  
   1.548  /* Check properties of (inuse) mmapped chunks */
   1.549  static void
   1.550 -do_check_mmapped_chunk (mstate m, mchunkptr p)
   1.551 +do_check_mmapped_chunk(mstate m, mchunkptr p)
   1.552  {
   1.553 -    size_t sz = chunksize (p);
   1.554 +    size_t sz = chunksize(p);
   1.555      size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
   1.556 -    assert (is_mmapped (p));
   1.557 -    assert (use_mmap (m));
   1.558 -    assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
   1.559 -    assert (ok_address (m, p));
   1.560 -    assert (!is_small (sz));
   1.561 -    assert ((len & (mparams.page_size - SIZE_T_ONE)) == 0);
   1.562 -    assert (chunk_plus_offset (p, sz)->head == FENCEPOST_HEAD);
   1.563 -    assert (chunk_plus_offset (p, sz + SIZE_T_SIZE)->head == 0);
   1.564 +    assert(is_mmapped(p));
   1.565 +    assert(use_mmap(m));
   1.566 +    assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
   1.567 +    assert(ok_address(m, p));
   1.568 +    assert(!is_small(sz));
   1.569 +    assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
   1.570 +    assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
   1.571 +    assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
   1.572  }
   1.573  
   1.574  /* Check properties of inuse chunks */
   1.575  static void
   1.576 -do_check_inuse_chunk (mstate m, mchunkptr p)
   1.577 +do_check_inuse_chunk(mstate m, mchunkptr p)
   1.578  {
   1.579 -    do_check_any_chunk (m, p);
   1.580 -    assert (cinuse (p));
   1.581 -    assert (next_pinuse (p));
   1.582 +    do_check_any_chunk(m, p);
   1.583 +    assert(cinuse(p));
   1.584 +    assert(next_pinuse(p));
   1.585      /* If not pinuse and not mmapped, previous chunk has OK offset */
   1.586 -    assert (is_mmapped (p) || pinuse (p) || next_chunk (prev_chunk (p)) == p);
   1.587 -    if (is_mmapped (p))
   1.588 -        do_check_mmapped_chunk (m, p);
   1.589 +    assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
   1.590 +    if (is_mmapped(p))
   1.591 +        do_check_mmapped_chunk(m, p);
   1.592  }
   1.593  
   1.594  /* Check properties of free chunks */
   1.595  static void
   1.596 -do_check_free_chunk (mstate m, mchunkptr p)
   1.597 +do_check_free_chunk(mstate m, mchunkptr p)
   1.598  {
   1.599      size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
   1.600 -    mchunkptr next = chunk_plus_offset (p, sz);
   1.601 -    do_check_any_chunk (m, p);
   1.602 -    assert (!cinuse (p));
   1.603 -    assert (!next_pinuse (p));
   1.604 -    assert (!is_mmapped (p));
   1.605 +    mchunkptr next = chunk_plus_offset(p, sz);
   1.606 +    do_check_any_chunk(m, p);
   1.607 +    assert(!cinuse(p));
   1.608 +    assert(!next_pinuse(p));
   1.609 +    assert(!is_mmapped(p));
   1.610      if (p != m->dv && p != m->top) {
   1.611          if (sz >= MIN_CHUNK_SIZE) {
   1.612 -            assert ((sz & CHUNK_ALIGN_MASK) == 0);
   1.613 -            assert (is_aligned (chunk2mem (p)));
   1.614 -            assert (next->prev_foot == sz);
   1.615 -            assert (pinuse (p));
   1.616 -            assert (next == m->top || cinuse (next));
   1.617 -            assert (p->fd->bk == p);
   1.618 -            assert (p->bk->fd == p);
   1.619 +            assert((sz & CHUNK_ALIGN_MASK) == 0);
   1.620 +            assert(is_aligned(chunk2mem(p)));
   1.621 +            assert(next->prev_foot == sz);
   1.622 +            assert(pinuse(p));
   1.623 +            assert(next == m->top || cinuse(next));
   1.624 +            assert(p->fd->bk == p);
   1.625 +            assert(p->bk->fd == p);
   1.626          } else                  /* markers are always of size SIZE_T_SIZE */
   1.627 -            assert (sz == SIZE_T_SIZE);
   1.628 +            assert(sz == SIZE_T_SIZE);
   1.629      }
   1.630  }
   1.631  
   1.632  /* Check properties of malloced chunks at the point they are malloced */
   1.633  static void
   1.634 -do_check_malloced_chunk (mstate m, void *mem, size_t s)
   1.635 +do_check_malloced_chunk(mstate m, void *mem, size_t s)
   1.636  {
   1.637      if (mem != 0) {
   1.638 -        mchunkptr p = mem2chunk (mem);
   1.639 +        mchunkptr p = mem2chunk(mem);
   1.640          size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
   1.641 -        do_check_inuse_chunk (m, p);
   1.642 -        assert ((sz & CHUNK_ALIGN_MASK) == 0);
   1.643 -        assert (sz >= MIN_CHUNK_SIZE);
   1.644 -        assert (sz >= s);
   1.645 +        do_check_inuse_chunk(m, p);
   1.646 +        assert((sz & CHUNK_ALIGN_MASK) == 0);
   1.647 +        assert(sz >= MIN_CHUNK_SIZE);
   1.648 +        assert(sz >= s);
   1.649          /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
   1.650 -        assert (is_mmapped (p) || sz < (s + MIN_CHUNK_SIZE));
   1.651 +        assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
   1.652      }
   1.653  }
   1.654  
   1.655  /* Check a tree and its subtrees.  */
   1.656  static void
   1.657 -do_check_tree (mstate m, tchunkptr t)
   1.658 +do_check_tree(mstate m, tchunkptr t)
   1.659  {
   1.660      tchunkptr head = 0;
   1.661      tchunkptr u = t;
   1.662      bindex_t tindex = t->index;
   1.663 -    size_t tsize = chunksize (t);
   1.664 +    size_t tsize = chunksize(t);
   1.665      bindex_t idx;
   1.666 -    compute_tree_index (tsize, idx);
   1.667 -    assert (tindex == idx);
   1.668 -    assert (tsize >= MIN_LARGE_SIZE);
   1.669 -    assert (tsize >= minsize_for_tree_index (idx));
   1.670 -    assert ((idx == NTREEBINS - 1)
   1.671 -            || (tsize < minsize_for_tree_index ((idx + 1))));
   1.672 +    compute_tree_index(tsize, idx);
   1.673 +    assert(tindex == idx);
   1.674 +    assert(tsize >= MIN_LARGE_SIZE);
   1.675 +    assert(tsize >= minsize_for_tree_index(idx));
   1.676 +    assert((idx == NTREEBINS - 1)
   1.677 +           || (tsize < minsize_for_tree_index((idx + 1))));
   1.678  
   1.679      do {                        /* traverse through chain of same-sized nodes */
   1.680 -        do_check_any_chunk (m, ((mchunkptr) u));
   1.681 -        assert (u->index == tindex);
   1.682 -        assert (chunksize (u) == tsize);
   1.683 -        assert (!cinuse (u));
   1.684 -        assert (!next_pinuse (u));
   1.685 -        assert (u->fd->bk == u);
   1.686 -        assert (u->bk->fd == u);
   1.687 +        do_check_any_chunk(m, ((mchunkptr) u));
   1.688 +        assert(u->index == tindex);
   1.689 +        assert(chunksize(u) == tsize);
   1.690 +        assert(!cinuse(u));
   1.691 +        assert(!next_pinuse(u));
   1.692 +        assert(u->fd->bk == u);
   1.693 +        assert(u->bk->fd == u);
   1.694          if (u->parent == 0) {
   1.695 -            assert (u->child[0] == 0);
   1.696 -            assert (u->child[1] == 0);
   1.697 +            assert(u->child[0] == 0);
   1.698 +            assert(u->child[1] == 0);
   1.699          } else {
   1.700 -            assert (head == 0); /* only one node on chain has parent */
   1.701 +            assert(head == 0);  /* only one node on chain has parent */
   1.702              head = u;
   1.703 -            assert (u->parent != u);
   1.704 -            assert (u->parent->child[0] == u ||
   1.705 -                    u->parent->child[1] == u ||
   1.706 -                    *((tbinptr *) (u->parent)) == u);
   1.707 +            assert(u->parent != u);
   1.708 +            assert(u->parent->child[0] == u ||
   1.709 +                   u->parent->child[1] == u ||
   1.710 +                   *((tbinptr *) (u->parent)) == u);
   1.711              if (u->child[0] != 0) {
   1.712 -                assert (u->child[0]->parent == u);
   1.713 -                assert (u->child[0] != u);
   1.714 -                do_check_tree (m, u->child[0]);
   1.715 +                assert(u->child[0]->parent == u);
   1.716 +                assert(u->child[0] != u);
   1.717 +                do_check_tree(m, u->child[0]);
   1.718              }
   1.719              if (u->child[1] != 0) {
   1.720 -                assert (u->child[1]->parent == u);
   1.721 -                assert (u->child[1] != u);
   1.722 -                do_check_tree (m, u->child[1]);
   1.723 +                assert(u->child[1]->parent == u);
   1.724 +                assert(u->child[1] != u);
   1.725 +                do_check_tree(m, u->child[1]);
   1.726              }
   1.727              if (u->child[0] != 0 && u->child[1] != 0) {
   1.728 -                assert (chunksize (u->child[0]) < chunksize (u->child[1]));
   1.729 +                assert(chunksize(u->child[0]) < chunksize(u->child[1]));
   1.730              }
   1.731          }
   1.732          u = u->fd;
   1.733      }
   1.734      while (u != t);
   1.735 -    assert (head != 0);
   1.736 +    assert(head != 0);
   1.737  }
   1.738  
   1.739  /*  Check all the chunks in a treebin.  */
   1.740  static void
   1.741 -do_check_treebin (mstate m, bindex_t i)
   1.742 +do_check_treebin(mstate m, bindex_t i)
   1.743  {
   1.744 -    tbinptr *tb = treebin_at (m, i);
   1.745 +    tbinptr *tb = treebin_at(m, i);
   1.746      tchunkptr t = *tb;
   1.747      int empty = (m->treemap & (1U << i)) == 0;
   1.748      if (t == 0)
   1.749 -        assert (empty);
   1.750 +        assert(empty);
   1.751      if (!empty)
   1.752 -        do_check_tree (m, t);
   1.753 +        do_check_tree(m, t);
   1.754  }
   1.755  
   1.756  /*  Check all the chunks in a smallbin.  */
   1.757  static void
   1.758 -do_check_smallbin (mstate m, bindex_t i)
   1.759 +do_check_smallbin(mstate m, bindex_t i)
   1.760  {
   1.761 -    sbinptr b = smallbin_at (m, i);
   1.762 +    sbinptr b = smallbin_at(m, i);
   1.763      mchunkptr p = b->bk;
   1.764      unsigned int empty = (m->smallmap & (1U << i)) == 0;
   1.765      if (p == b)
   1.766 -        assert (empty);
   1.767 +        assert(empty);
   1.768      if (!empty) {
   1.769          for (; p != b; p = p->bk) {
   1.770 -            size_t size = chunksize (p);
   1.771 +            size_t size = chunksize(p);
   1.772              mchunkptr q;
   1.773              /* each chunk claims to be free */
   1.774 -            do_check_free_chunk (m, p);
   1.775 +            do_check_free_chunk(m, p);
   1.776              /* chunk belongs in bin */
   1.777 -            assert (small_index (size) == i);
   1.778 -            assert (p->bk == b || chunksize (p->bk) == chunksize (p));
   1.779 +            assert(small_index(size) == i);
   1.780 +            assert(p->bk == b || chunksize(p->bk) == chunksize(p));
   1.781              /* chunk is followed by an inuse chunk */
   1.782 -            q = next_chunk (p);
   1.783 +            q = next_chunk(p);
   1.784              if (q->head != FENCEPOST_HEAD)
   1.785 -                do_check_inuse_chunk (m, q);
   1.786 +                do_check_inuse_chunk(m, q);
   1.787          }
   1.788      }
   1.789  }
   1.790  
   1.791  /* Find x in a bin. Used in other check functions. */
   1.792  static int
   1.793 -bin_find (mstate m, mchunkptr x)
   1.794 +bin_find(mstate m, mchunkptr x)
   1.795  {
   1.796 -    size_t size = chunksize (x);
   1.797 -    if (is_small (size)) {
   1.798 -        bindex_t sidx = small_index (size);
   1.799 -        sbinptr b = smallbin_at (m, sidx);
   1.800 -        if (smallmap_is_marked (m, sidx)) {
   1.801 +    size_t size = chunksize(x);
   1.802 +    if (is_small(size)) {
   1.803 +        bindex_t sidx = small_index(size);
   1.804 +        sbinptr b = smallbin_at(m, sidx);
   1.805 +        if (smallmap_is_marked(m, sidx)) {
   1.806              mchunkptr p = b;
   1.807              do {
   1.808                  if (p == x)
   1.809 @@ -2791,11 +2789,11 @@
   1.810          }
   1.811      } else {
   1.812          bindex_t tidx;
   1.813 -        compute_tree_index (size, tidx);
   1.814 -        if (treemap_is_marked (m, tidx)) {
   1.815 -            tchunkptr t = *treebin_at (m, tidx);
   1.816 -            size_t sizebits = size << leftshift_for_tree_index (tidx);
   1.817 -            while (t != 0 && chunksize (t) != size) {
   1.818 +        compute_tree_index(size, tidx);
   1.819 +        if (treemap_is_marked(m, tidx)) {
   1.820 +            tchunkptr t = *treebin_at(m, tidx);
   1.821 +            size_t sizebits = size << leftshift_for_tree_index(tidx);
   1.822 +            while (t != 0 && chunksize(t) != size) {
   1.823                  t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
   1.824                  sizebits <<= 1;
   1.825              }
   1.826 @@ -2814,29 +2812,29 @@
   1.827  
   1.828  /* Traverse each chunk and check it; return total */
   1.829  static size_t
   1.830 -traverse_and_check (mstate m)
   1.831 +traverse_and_check(mstate m)
   1.832  {
   1.833      size_t sum = 0;
   1.834 -    if (is_initialized (m)) {
   1.835 +    if (is_initialized(m)) {
   1.836          msegmentptr s = &m->seg;
   1.837          sum += m->topsize + TOP_FOOT_SIZE;
   1.838          while (s != 0) {
   1.839 -            mchunkptr q = align_as_chunk (s->base);
   1.840 +            mchunkptr q = align_as_chunk(s->base);
   1.841              mchunkptr lastq = 0;
   1.842 -            assert (pinuse (q));
   1.843 -            while (segment_holds (s, q) &&
   1.844 +            assert(pinuse(q));
   1.845 +            while (segment_holds(s, q) &&
   1.846                     q != m->top && q->head != FENCEPOST_HEAD) {
   1.847 -                sum += chunksize (q);
   1.848 -                if (cinuse (q)) {
   1.849 -                    assert (!bin_find (m, q));
   1.850 -                    do_check_inuse_chunk (m, q);
   1.851 +                sum += chunksize(q);
   1.852 +                if (cinuse(q)) {
   1.853 +                    assert(!bin_find(m, q));
   1.854 +                    do_check_inuse_chunk(m, q);
   1.855                  } else {
   1.856 -                    assert (q == m->dv || bin_find (m, q));
   1.857 -                    assert (lastq == 0 || cinuse (lastq));      /* Not 2 consecutive free */
   1.858 -                    do_check_free_chunk (m, q);
   1.859 +                    assert(q == m->dv || bin_find(m, q));
   1.860 +                    assert(lastq == 0 || cinuse(lastq));        /* Not 2 consecutive free */
   1.861 +                    do_check_free_chunk(m, q);
   1.862                  }
   1.863                  lastq = q;
   1.864 -                q = next_chunk (q);
   1.865 +                q = next_chunk(q);
   1.866              }
   1.867              s = s->next;
   1.868          }
   1.869 @@ -2846,33 +2844,33 @@
   1.870  
   1.871  /* Check all properties of malloc_state. */
   1.872  static void
   1.873 -do_check_malloc_state (mstate m)
   1.874 +do_check_malloc_state(mstate m)
   1.875  {
   1.876      bindex_t i;
   1.877      size_t total;
   1.878      /* check bins */
   1.879      for (i = 0; i < NSMALLBINS; ++i)
   1.880 -        do_check_smallbin (m, i);
   1.881 +        do_check_smallbin(m, i);
   1.882      for (i = 0; i < NTREEBINS; ++i)
   1.883 -        do_check_treebin (m, i);
   1.884 +        do_check_treebin(m, i);
   1.885  
   1.886      if (m->dvsize != 0) {       /* check dv chunk */
   1.887 -        do_check_any_chunk (m, m->dv);
   1.888 -        assert (m->dvsize == chunksize (m->dv));
   1.889 -        assert (m->dvsize >= MIN_CHUNK_SIZE);
   1.890 -        assert (bin_find (m, m->dv) == 0);
   1.891 +        do_check_any_chunk(m, m->dv);
   1.892 +        assert(m->dvsize == chunksize(m->dv));
   1.893 +        assert(m->dvsize >= MIN_CHUNK_SIZE);
   1.894 +        assert(bin_find(m, m->dv) == 0);
   1.895      }
   1.896  
   1.897      if (m->top != 0) {          /* check top chunk */
   1.898 -        do_check_top_chunk (m, m->top);
   1.899 -        assert (m->topsize == chunksize (m->top));
   1.900 -        assert (m->topsize > 0);
   1.901 -        assert (bin_find (m, m->top) == 0);
   1.902 +        do_check_top_chunk(m, m->top);
   1.903 +        assert(m->topsize == chunksize(m->top));
   1.904 +        assert(m->topsize > 0);
   1.905 +        assert(bin_find(m, m->top) == 0);
   1.906      }
   1.907  
   1.908 -    total = traverse_and_check (m);
   1.909 -    assert (total <= m->footprint);
   1.910 -    assert (m->footprint <= m->max_footprint);
   1.911 +    total = traverse_and_check(m);
   1.912 +    assert(total <= m->footprint);
   1.913 +    assert(m->footprint <= m->max_footprint);
   1.914  }
   1.915  #endif /* DEBUG */
   1.916  
   1.917 @@ -2880,27 +2878,27 @@
   1.918  
   1.919  #if !NO_MALLINFO
   1.920  static struct mallinfo
   1.921 -internal_mallinfo (mstate m)
   1.922 +internal_mallinfo(mstate m)
   1.923  {
   1.924      struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
   1.925 -    if (!PREACTION (m)) {
   1.926 -        check_malloc_state (m);
   1.927 -        if (is_initialized (m)) {
   1.928 +    if (!PREACTION(m)) {
   1.929 +        check_malloc_state(m);
   1.930 +        if (is_initialized(m)) {
   1.931              size_t nfree = SIZE_T_ONE;  /* top always free */
   1.932              size_t mfree = m->topsize + TOP_FOOT_SIZE;
   1.933              size_t sum = mfree;
   1.934              msegmentptr s = &m->seg;
   1.935              while (s != 0) {
   1.936 -                mchunkptr q = align_as_chunk (s->base);
   1.937 -                while (segment_holds (s, q) &&
   1.938 +                mchunkptr q = align_as_chunk(s->base);
   1.939 +                while (segment_holds(s, q) &&
   1.940                         q != m->top && q->head != FENCEPOST_HEAD) {
   1.941 -                    size_t sz = chunksize (q);
   1.942 +                    size_t sz = chunksize(q);
   1.943                      sum += sz;
   1.944 -                    if (!cinuse (q)) {
   1.945 +                    if (!cinuse(q)) {
   1.946                          mfree += sz;
   1.947                          ++nfree;
   1.948                      }
   1.949 -                    q = next_chunk (q);
   1.950 +                    q = next_chunk(q);
   1.951                  }
   1.952                  s = s->next;
   1.953              }
   1.954 @@ -2914,46 +2912,45 @@
   1.955              nm.keepcost = m->topsize;
   1.956          }
   1.957  
   1.958 -        POSTACTION (m);
   1.959 +        POSTACTION(m);
   1.960      }
   1.961      return nm;
   1.962  }
   1.963  #endif /* !NO_MALLINFO */
   1.964  
   1.965  static void
   1.966 -internal_malloc_stats (mstate m)
   1.967 +internal_malloc_stats(mstate m)
   1.968  {
   1.969 -    if (!PREACTION (m)) {
   1.970 +    if (!PREACTION(m)) {
   1.971          size_t maxfp = 0;
   1.972          size_t fp = 0;
   1.973          size_t used = 0;
   1.974 -        check_malloc_state (m);
   1.975 -        if (is_initialized (m)) {
   1.976 +        check_malloc_state(m);
   1.977 +        if (is_initialized(m)) {
   1.978              msegmentptr s = &m->seg;
   1.979              maxfp = m->max_footprint;
   1.980              fp = m->footprint;
   1.981              used = fp - (m->topsize + TOP_FOOT_SIZE);
   1.982  
   1.983              while (s != 0) {
   1.984 -                mchunkptr q = align_as_chunk (s->base);
   1.985 -                while (segment_holds (s, q) &&
   1.986 +                mchunkptr q = align_as_chunk(s->base);
   1.987 +                while (segment_holds(s, q) &&
   1.988                         q != m->top && q->head != FENCEPOST_HEAD) {
   1.989 -                    if (!cinuse (q))
   1.990 -                        used -= chunksize (q);
   1.991 -                    q = next_chunk (q);
   1.992 +                    if (!cinuse(q))
   1.993 +                        used -= chunksize(q);
   1.994 +                    q = next_chunk(q);
   1.995                  }
   1.996                  s = s->next;
   1.997              }
   1.998          }
   1.999  #ifndef LACKS_STDIO_H
  1.1000 -        fprintf (stderr, "max system bytes = %10lu\n",
  1.1001 -                 (unsigned long) (maxfp));
  1.1002 -        fprintf (stderr, "system bytes     = %10lu\n", (unsigned long) (fp));
  1.1003 -        fprintf (stderr, "in use bytes     = %10lu\n",
  1.1004 -                 (unsigned long) (used));
  1.1005 +        fprintf(stderr, "max system bytes = %10lu\n",
  1.1006 +                (unsigned long) (maxfp));
  1.1007 +        fprintf(stderr, "system bytes     = %10lu\n", (unsigned long) (fp));
  1.1008 +        fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long) (used));
  1.1009  #endif
  1.1010  
  1.1011 -        POSTACTION (m);
  1.1012 +        POSTACTION(m);
  1.1013      }
  1.1014  }
  1.1015  
  1.1016 @@ -3219,29 +3216,29 @@
  1.1017  
  1.1018  /* Malloc using mmap */
  1.1019  static void *
  1.1020 -mmap_alloc (mstate m, size_t nb)
  1.1021 +mmap_alloc(mstate m, size_t nb)
  1.1022  {
  1.1023      size_t mmsize =
  1.1024 -        granularity_align (nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  1.1025 +        granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  1.1026      if (mmsize > nb) {          /* Check for wrap around 0 */
  1.1027 -        char *mm = (char *) (DIRECT_MMAP (mmsize));
  1.1028 +        char *mm = (char *) (DIRECT_MMAP(mmsize));
  1.1029          if (mm != CMFAIL) {
  1.1030 -            size_t offset = align_offset (chunk2mem (mm));
  1.1031 +            size_t offset = align_offset(chunk2mem(mm));
  1.1032              size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  1.1033              mchunkptr p = (mchunkptr) (mm + offset);
  1.1034              p->prev_foot = offset | IS_MMAPPED_BIT;
  1.1035              (p)->head = (psize | CINUSE_BIT);
  1.1036 -            mark_inuse_foot (m, p, psize);
  1.1037 -            chunk_plus_offset (p, psize)->head = FENCEPOST_HEAD;
  1.1038 -            chunk_plus_offset (p, psize + SIZE_T_SIZE)->head = 0;
  1.1039 +            mark_inuse_foot(m, p, psize);
  1.1040 +            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  1.1041 +            chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
  1.1042  
  1.1043              if (mm < m->least_addr)
  1.1044                  m->least_addr = mm;
  1.1045              if ((m->footprint += mmsize) > m->max_footprint)
  1.1046                  m->max_footprint = m->footprint;
  1.1047 -            assert (is_aligned (chunk2mem (p)));
  1.1048 -            check_mmapped_chunk (m, p);
  1.1049 -            return chunk2mem (p);
  1.1050 +            assert(is_aligned(chunk2mem(p)));
  1.1051 +            check_mmapped_chunk(m, p);
  1.1052 +            return chunk2mem(p);
  1.1053          }
  1.1054      }
  1.1055      return 0;
  1.1056 @@ -3249,10 +3246,10 @@
  1.1057  
  1.1058  /* Realloc using mmap */
  1.1059  static mchunkptr
  1.1060 -mmap_resize (mstate m, mchunkptr oldp, size_t nb)
  1.1061 +mmap_resize(mstate m, mchunkptr oldp, size_t nb)
  1.1062  {
  1.1063 -    size_t oldsize = chunksize (oldp);
  1.1064 -    if (is_small (nb))          /* Can't shrink mmap regions below small size */
  1.1065 +    size_t oldsize = chunksize(oldp);
  1.1066 +    if (is_small(nb))           /* Can't shrink mmap regions below small size */
  1.1067          return 0;
  1.1068      /* Keep old chunk if big enough but not too big */
  1.1069      if (oldsize >= nb + SIZE_T_SIZE &&
  1.1070 @@ -3261,23 +3258,23 @@
  1.1071      else {
  1.1072          size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
  1.1073          size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
  1.1074 -        size_t newmmsize = granularity_align (nb + SIX_SIZE_T_SIZES +
  1.1075 -                                              CHUNK_ALIGN_MASK);
  1.1076 -        char *cp = (char *) CALL_MREMAP ((char *) oldp - offset,
  1.1077 -                                         oldmmsize, newmmsize, 1);
  1.1078 +        size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
  1.1079 +                                             CHUNK_ALIGN_MASK);
  1.1080 +        char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
  1.1081 +                                        oldmmsize, newmmsize, 1);
  1.1082          if (cp != CMFAIL) {
  1.1083              mchunkptr newp = (mchunkptr) (cp + offset);
  1.1084              size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
  1.1085              newp->head = (psize | CINUSE_BIT);
  1.1086 -            mark_inuse_foot (m, newp, psize);
  1.1087 -            chunk_plus_offset (newp, psize)->head = FENCEPOST_HEAD;
  1.1088 -            chunk_plus_offset (newp, psize + SIZE_T_SIZE)->head = 0;
  1.1089 +            mark_inuse_foot(m, newp, psize);
  1.1090 +            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
  1.1091 +            chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
  1.1092  
  1.1093              if (cp < m->least_addr)
  1.1094                  m->least_addr = cp;
  1.1095              if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
  1.1096                  m->max_footprint = m->footprint;
  1.1097 -            check_mmapped_chunk (m, newp);
  1.1098 +            check_mmapped_chunk(m, newp);
  1.1099              return newp;
  1.1100          }
  1.1101      }
  1.1102 @@ -3288,10 +3285,10 @@
  1.1103  
  1.1104  /* Initialize top chunk and its size */
  1.1105  static void
  1.1106 -init_top (mstate m, mchunkptr p, size_t psize)
  1.1107 +init_top(mstate m, mchunkptr p, size_t psize)
  1.1108  {
  1.1109      /* Ensure alignment */
  1.1110 -    size_t offset = align_offset (chunk2mem (p));
  1.1111 +    size_t offset = align_offset(chunk2mem(p));
  1.1112      p = (mchunkptr) ((char *) p + offset);
  1.1113      psize -= offset;
  1.1114  
  1.1115 @@ -3299,18 +3296,18 @@
  1.1116      m->topsize = psize;
  1.1117      p->head = psize | PINUSE_BIT;
  1.1118      /* set size of fake trailing chunk holding overhead space only once */
  1.1119 -    chunk_plus_offset (p, psize)->head = TOP_FOOT_SIZE;
  1.1120 +    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  1.1121      m->trim_check = mparams.trim_threshold;     /* reset on each update */
  1.1122  }
  1.1123  
  1.1124  /* Initialize bins for a new mstate that is otherwise zeroed out */
  1.1125  static void
  1.1126 -init_bins (mstate m)
  1.1127 +init_bins(mstate m)
  1.1128  {
  1.1129      /* Establish circular links for smallbins */
  1.1130      bindex_t i;
  1.1131      for (i = 0; i < NSMALLBINS; ++i) {
  1.1132 -        sbinptr bin = smallbin_at (m, i);
  1.1133 +        sbinptr bin = smallbin_at(m, i);
  1.1134          bin->fd = bin->bk = bin;
  1.1135      }
  1.1136  }
  1.1137 @@ -3319,7 +3316,7 @@
  1.1138  
  1.1139  /* default corruption action */
  1.1140  static void
  1.1141 -reset_on_error (mstate m)
  1.1142 +reset_on_error(mstate m)
  1.1143  {
  1.1144      int i;
  1.1145      ++malloc_corruption_error_count;
  1.1146 @@ -3331,78 +3328,78 @@
  1.1147      m->seg.next = 0;
  1.1148      m->top = m->dv = 0;
  1.1149      for (i = 0; i < NTREEBINS; ++i)
  1.1150 -        *treebin_at (m, i) = 0;
  1.1151 -    init_bins (m);
  1.1152 +        *treebin_at(m, i) = 0;
  1.1153 +    init_bins(m);
  1.1154  }
  1.1155  #endif /* PROCEED_ON_ERROR */
  1.1156  
  1.1157  /* Allocate chunk and prepend remainder with chunk in successor base. */
  1.1158  static void *
  1.1159 -prepend_alloc (mstate m, char *newbase, char *oldbase, size_t nb)
  1.1160 +prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
  1.1161  {
  1.1162 -    mchunkptr p = align_as_chunk (newbase);
  1.1163 -    mchunkptr oldfirst = align_as_chunk (oldbase);
  1.1164 +    mchunkptr p = align_as_chunk(newbase);
  1.1165 +    mchunkptr oldfirst = align_as_chunk(oldbase);
  1.1166      size_t psize = (char *) oldfirst - (char *) p;
  1.1167 -    mchunkptr q = chunk_plus_offset (p, nb);
  1.1168 +    mchunkptr q = chunk_plus_offset(p, nb);
  1.1169      size_t qsize = psize - nb;
  1.1170 -    set_size_and_pinuse_of_inuse_chunk (m, p, nb);
  1.1171 -
  1.1172 -    assert ((char *) oldfirst > (char *) q);
  1.1173 -    assert (pinuse (oldfirst));
  1.1174 -    assert (qsize >= MIN_CHUNK_SIZE);
  1.1175 +    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  1.1176 +
  1.1177 +    assert((char *) oldfirst > (char *) q);
  1.1178 +    assert(pinuse(oldfirst));
  1.1179 +    assert(qsize >= MIN_CHUNK_SIZE);
  1.1180  
  1.1181      /* consolidate remainder with first chunk of old base */
  1.1182      if (oldfirst == m->top) {
  1.1183          size_t tsize = m->topsize += qsize;
  1.1184          m->top = q;
  1.1185          q->head = tsize | PINUSE_BIT;
  1.1186 -        check_top_chunk (m, q);
  1.1187 +        check_top_chunk(m, q);
  1.1188      } else if (oldfirst == m->dv) {
  1.1189          size_t dsize = m->dvsize += qsize;
  1.1190          m->dv = q;
  1.1191 -        set_size_and_pinuse_of_free_chunk (q, dsize);
  1.1192 +        set_size_and_pinuse_of_free_chunk(q, dsize);
  1.1193      } else {
  1.1194 -        if (!cinuse (oldfirst)) {
  1.1195 -            size_t nsize = chunksize (oldfirst);
  1.1196 -            unlink_chunk (m, oldfirst, nsize);
  1.1197 -            oldfirst = chunk_plus_offset (oldfirst, nsize);
  1.1198 +        if (!cinuse(oldfirst)) {
  1.1199 +            size_t nsize = chunksize(oldfirst);
  1.1200 +            unlink_chunk(m, oldfirst, nsize);
  1.1201 +            oldfirst = chunk_plus_offset(oldfirst, nsize);
  1.1202              qsize += nsize;
  1.1203          }
  1.1204 -        set_free_with_pinuse (q, qsize, oldfirst);
  1.1205 -        insert_chunk (m, q, qsize);
  1.1206 -        check_free_chunk (m, q);
  1.1207 +        set_free_with_pinuse(q, qsize, oldfirst);
  1.1208 +        insert_chunk(m, q, qsize);
  1.1209 +        check_free_chunk(m, q);
  1.1210      }
  1.1211  
  1.1212 -    check_malloced_chunk (m, chunk2mem (p), nb);
  1.1213 -    return chunk2mem (p);
  1.1214 +    check_malloced_chunk(m, chunk2mem(p), nb);
  1.1215 +    return chunk2mem(p);
  1.1216  }
  1.1217  
  1.1218  
  1.1219  /* Add a segment to hold a new noncontiguous region */
  1.1220  static void
  1.1221 -add_segment (mstate m, char *tbase, size_t tsize, flag_t mmapped)
  1.1222 +add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
  1.1223  {
  1.1224      /* Determine locations and sizes of segment, fenceposts, old top */
  1.1225      char *old_top = (char *) m->top;
  1.1226 -    msegmentptr oldsp = segment_holding (m, old_top);
  1.1227 +    msegmentptr oldsp = segment_holding(m, old_top);
  1.1228      char *old_end = oldsp->base + oldsp->size;
  1.1229 -    size_t ssize = pad_request (sizeof (struct malloc_segment));
  1.1230 +    size_t ssize = pad_request(sizeof(struct malloc_segment));
  1.1231      char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  1.1232 -    size_t offset = align_offset (chunk2mem (rawsp));
  1.1233 +    size_t offset = align_offset(chunk2mem(rawsp));
  1.1234      char *asp = rawsp + offset;
  1.1235      char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
  1.1236      mchunkptr sp = (mchunkptr) csp;
  1.1237 -    msegmentptr ss = (msegmentptr) (chunk2mem (sp));
  1.1238 -    mchunkptr tnext = chunk_plus_offset (sp, ssize);
  1.1239 +    msegmentptr ss = (msegmentptr) (chunk2mem(sp));
  1.1240 +    mchunkptr tnext = chunk_plus_offset(sp, ssize);
  1.1241      mchunkptr p = tnext;
  1.1242      int nfences = 0;
  1.1243  
  1.1244      /* reset top to new space */
  1.1245 -    init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  1.1246 +    init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  1.1247  
  1.1248      /* Set up segment record */
  1.1249 -    assert (is_aligned (ss));
  1.1250 -    set_size_and_pinuse_of_inuse_chunk (m, sp, ssize);
  1.1251 +    assert(is_aligned(ss));
  1.1252 +    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  1.1253      *ss = m->seg;               /* Push current record */
  1.1254      m->seg.base = tbase;
  1.1255      m->seg.size = tsize;
  1.1256 @@ -3411,7 +3408,7 @@
  1.1257  
  1.1258      /* Insert trailing fenceposts */
  1.1259      for (;;) {
  1.1260 -        mchunkptr nextp = chunk_plus_offset (p, SIZE_T_SIZE);
  1.1261 +        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  1.1262          p->head = FENCEPOST_HEAD;
  1.1263          ++nfences;
  1.1264          if ((char *) (&(nextp->head)) < old_end)
  1.1265 @@ -3419,35 +3416,35 @@
  1.1266          else
  1.1267              break;
  1.1268      }
  1.1269 -    assert (nfences >= 2);
  1.1270 +    assert(nfences >= 2);
  1.1271  
  1.1272      /* Insert the rest of old top into a bin as an ordinary free chunk */
  1.1273      if (csp != old_top) {
  1.1274          mchunkptr q = (mchunkptr) old_top;
  1.1275          size_t psize = csp - old_top;
  1.1276 -        mchunkptr tn = chunk_plus_offset (q, psize);
  1.1277 -        set_free_with_pinuse (q, psize, tn);
  1.1278 -        insert_chunk (m, q, psize);
  1.1279 +        mchunkptr tn = chunk_plus_offset(q, psize);
  1.1280 +        set_free_with_pinuse(q, psize, tn);
  1.1281 +        insert_chunk(m, q, psize);
  1.1282      }
  1.1283  
  1.1284 -    check_top_chunk (m, m->top);
  1.1285 +    check_top_chunk(m, m->top);
  1.1286  }
  1.1287  
  1.1288  /* -------------------------- System allocation -------------------------- */
  1.1289  
  1.1290  /* Get memory from system using MORECORE or MMAP */
  1.1291  static void *
  1.1292 -sys_alloc (mstate m, size_t nb)
  1.1293 +sys_alloc(mstate m, size_t nb)
  1.1294  {
  1.1295      char *tbase = CMFAIL;
  1.1296      size_t tsize = 0;
  1.1297      flag_t mmap_flag = 0;
  1.1298  
  1.1299 -    init_mparams ();
  1.1300 +    init_mparams();
  1.1301  
  1.1302      /* Directly map large chunks */
  1.1303 -    if (use_mmap (m) && nb >= mparams.mmap_threshold) {
  1.1304 -        void *mem = mmap_alloc (m, nb);
  1.1305 +    if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  1.1306 +        void *mem = mmap_alloc(m, nb);
  1.1307          if (mem != 0)
  1.1308              return mem;
  1.1309      }
  1.1310 @@ -3469,23 +3466,23 @@
  1.1311         (disabled if not HAVE_MORECORE)
  1.1312       */
  1.1313  
  1.1314 -    if (MORECORE_CONTIGUOUS && !use_noncontiguous (m)) {
  1.1315 +    if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  1.1316          char *br = CMFAIL;
  1.1317          msegmentptr ss =
  1.1318 -            (m->top == 0) ? 0 : segment_holding (m, (char *) m->top);
  1.1319 +            (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
  1.1320          size_t asize = 0;
  1.1321 -        ACQUIRE_MORECORE_LOCK ();
  1.1322 +        ACQUIRE_MORECORE_LOCK();
  1.1323  
  1.1324          if (ss == 0) {          /* First time through or recovery */
  1.1325 -            char *base = (char *) CALL_MORECORE (0);
  1.1326 +            char *base = (char *) CALL_MORECORE(0);
  1.1327              if (base != CMFAIL) {
  1.1328 -                asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.1329 +                asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.1330                  /* Adjust to end on a page boundary */
  1.1331 -                if (!is_page_aligned (base))
  1.1332 -                    asize += (page_align ((size_t) base) - (size_t) base);
  1.1333 +                if (!is_page_aligned(base))
  1.1334 +                    asize += (page_align((size_t) base) - (size_t) base);
  1.1335                  /* Can't call MORECORE if size is negative when treated as signed */
  1.1336                  if (asize < HALF_MAX_SIZE_T &&
  1.1337 -                    (br = (char *) (CALL_MORECORE (asize))) == base) {
  1.1338 +                    (br = (char *) (CALL_MORECORE(asize))) == base) {
  1.1339                      tbase = base;
  1.1340                      tsize = asize;
  1.1341                  }
  1.1342 @@ -3493,12 +3490,12 @@
  1.1343          } else {
  1.1344              /* Subtract out existing available top space from MORECORE request. */
  1.1345              asize =
  1.1346 -                granularity_align (nb - m->topsize + TOP_FOOT_SIZE +
  1.1347 -                                   SIZE_T_ONE);
  1.1348 +                granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
  1.1349 +                                  SIZE_T_ONE);
  1.1350              /* Use mem here only if it did continuously extend old space */
  1.1351              if (asize < HALF_MAX_SIZE_T &&
  1.1352                  (br =
  1.1353 -                 (char *) (CALL_MORECORE (asize))) == ss->base + ss->size) {
  1.1354 +                 (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
  1.1355                  tbase = br;
  1.1356                  tsize = asize;
  1.1357              }
  1.1358 @@ -3509,14 +3506,14 @@
  1.1359                  if (asize < HALF_MAX_SIZE_T &&
  1.1360                      asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  1.1361                      size_t esize =
  1.1362 -                        granularity_align (nb + TOP_FOOT_SIZE +
  1.1363 -                                           SIZE_T_ONE - asize);
  1.1364 +                        granularity_align(nb + TOP_FOOT_SIZE +
  1.1365 +                                          SIZE_T_ONE - asize);
  1.1366                      if (esize < HALF_MAX_SIZE_T) {
  1.1367 -                        char *end = (char *) CALL_MORECORE (esize);
  1.1368 +                        char *end = (char *) CALL_MORECORE(esize);
  1.1369                          if (end != CMFAIL)
  1.1370                              asize += esize;
  1.1371                          else {  /* Can't use; try to release */
  1.1372 -                            end = (char *) CALL_MORECORE (-asize);
  1.1373 +                            end = (char *) CALL_MORECORE(-asize);
  1.1374                              br = CMFAIL;
  1.1375                          }
  1.1376                      }
  1.1377 @@ -3526,17 +3523,17 @@
  1.1378                  tbase = br;
  1.1379                  tsize = asize;
  1.1380              } else
  1.1381 -                disable_contiguous (m); /* Don't try contiguous path in the future */
  1.1382 +                disable_contiguous(m);  /* Don't try contiguous path in the future */
  1.1383          }
  1.1384  
  1.1385 -        RELEASE_MORECORE_LOCK ();
  1.1386 +        RELEASE_MORECORE_LOCK();
  1.1387      }
  1.1388  
  1.1389      if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
  1.1390          size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  1.1391 -        size_t rsize = granularity_align (req);
  1.1392 +        size_t rsize = granularity_align(req);
  1.1393          if (rsize > nb) {       /* Fail if wraps around zero */
  1.1394 -            char *mp = (char *) (CALL_MMAP (rsize));
  1.1395 +            char *mp = (char *) (CALL_MMAP(rsize));
  1.1396              if (mp != CMFAIL) {
  1.1397                  tbase = mp;
  1.1398                  tsize = rsize;
  1.1399 @@ -3546,14 +3543,14 @@
  1.1400      }
  1.1401  
  1.1402      if (HAVE_MORECORE && tbase == CMFAIL) {     /* Try noncontiguous MORECORE */
  1.1403 -        size_t asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.1404 +        size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  1.1405          if (asize < HALF_MAX_SIZE_T) {
  1.1406              char *br = CMFAIL;
  1.1407              char *end = CMFAIL;
  1.1408 -            ACQUIRE_MORECORE_LOCK ();
  1.1409 -            br = (char *) (CALL_MORECORE (asize));
  1.1410 -            end = (char *) (CALL_MORECORE (0));
  1.1411 -            RELEASE_MORECORE_LOCK ();
  1.1412 +            ACQUIRE_MORECORE_LOCK();
  1.1413 +            br = (char *) (CALL_MORECORE(asize));
  1.1414 +            end = (char *) (CALL_MORECORE(0));
  1.1415 +            RELEASE_MORECORE_LOCK();
  1.1416              if (br != CMFAIL && end != CMFAIL && br < end) {
  1.1417                  size_t ssize = end - br;
  1.1418                  if (ssize > nb + TOP_FOOT_SIZE) {
  1.1419 @@ -3569,20 +3566,20 @@
  1.1420          if ((m->footprint += tsize) > m->max_footprint)
  1.1421              m->max_footprint = m->footprint;
  1.1422  
  1.1423 -        if (!is_initialized (m)) {      /* first-time initialization */
  1.1424 +        if (!is_initialized(m)) {       /* first-time initialization */
  1.1425              m->seg.base = m->least_addr = tbase;
  1.1426              m->seg.size = tsize;
  1.1427              m->seg.sflags = mmap_flag;
  1.1428              m->magic = mparams.magic;
  1.1429 -            init_bins (m);
  1.1430 -            if (is_global (m))
  1.1431 -                init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  1.1432 +            init_bins(m);
  1.1433 +            if (is_global(m))
  1.1434 +                init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  1.1435              else {
  1.1436                  /* Offset top by embedded malloc_state */
  1.1437 -                mchunkptr mn = next_chunk (mem2chunk (m));
  1.1438 -                init_top (m, mn,
  1.1439 -                          (size_t) ((tbase + tsize) - (char *) mn) -
  1.1440 -                          TOP_FOOT_SIZE);
  1.1441 +                mchunkptr mn = next_chunk(mem2chunk(m));
  1.1442 +                init_top(m, mn,
  1.1443 +                         (size_t) ((tbase + tsize) - (char *) mn) -
  1.1444 +                         TOP_FOOT_SIZE);
  1.1445              }
  1.1446          }
  1.1447  
  1.1448 @@ -3591,9 +3588,9 @@
  1.1449              msegmentptr sp = &m->seg;
  1.1450              while (sp != 0 && tbase != sp->base + sp->size)
  1.1451                  sp = sp->next;
  1.1452 -            if (sp != 0 && !is_extern_segment (sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds (sp, m->top)) {       /* append */
  1.1453 +            if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
  1.1454                  sp->size += tsize;
  1.1455 -                init_top (m, m->top, m->topsize + tsize);
  1.1456 +                init_top(m, m->top, m->topsize + tsize);
  1.1457              } else {
  1.1458                  if (tbase < m->least_addr)
  1.1459                      m->least_addr = tbase;
  1.1460 @@ -3601,26 +3598,26 @@
  1.1461                  while (sp != 0 && sp->base != tbase + tsize)
  1.1462                      sp = sp->next;
  1.1463                  if (sp != 0 &&
  1.1464 -                    !is_extern_segment (sp) &&
  1.1465 +                    !is_extern_segment(sp) &&
  1.1466                      (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
  1.1467                      char *oldbase = sp->base;
  1.1468                      sp->base = tbase;
  1.1469                      sp->size += tsize;
  1.1470 -                    return prepend_alloc (m, tbase, oldbase, nb);
  1.1471 +                    return prepend_alloc(m, tbase, oldbase, nb);
  1.1472                  } else
  1.1473 -                    add_segment (m, tbase, tsize, mmap_flag);
  1.1474 +                    add_segment(m, tbase, tsize, mmap_flag);
  1.1475              }
  1.1476          }
  1.1477  
  1.1478          if (nb < m->topsize) {  /* Allocate from new or extended top space */
  1.1479              size_t rsize = m->topsize -= nb;
  1.1480              mchunkptr p = m->top;
  1.1481 -            mchunkptr r = m->top = chunk_plus_offset (p, nb);
  1.1482 +            mchunkptr r = m->top = chunk_plus_offset(p, nb);
  1.1483              r->head = rsize | PINUSE_BIT;
  1.1484 -            set_size_and_pinuse_of_inuse_chunk (m, p, nb);
  1.1485 -            check_top_chunk (m, m->top);
  1.1486 -            check_malloced_chunk (m, chunk2mem (p), nb);
  1.1487 -            return chunk2mem (p);
  1.1488 +            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  1.1489 +            check_top_chunk(m, m->top);
  1.1490 +            check_malloced_chunk(m, chunk2mem(p), nb);
  1.1491 +            return chunk2mem(p);
  1.1492          }
  1.1493      }
  1.1494  
  1.1495 @@ -3632,7 +3629,7 @@
  1.1496  
  1.1497  /* Unmap and unlink any mmapped segments that don't contain used chunks */
  1.1498  static size_t
  1.1499 -release_unused_segments (mstate m)
  1.1500 +release_unused_segments(mstate m)
  1.1501  {
  1.1502      size_t released = 0;
  1.1503      msegmentptr pred = &m->seg;
  1.1504 @@ -3641,28 +3638,28 @@
  1.1505          char *base = sp->base;
  1.1506          size_t size = sp->size;
  1.1507          msegmentptr next = sp->next;
  1.1508 -        if (is_mmapped_segment (sp) && !is_extern_segment (sp)) {
  1.1509 -            mchunkptr p = align_as_chunk (base);
  1.1510 -            size_t psize = chunksize (p);
  1.1511 +        if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  1.1512 +            mchunkptr p = align_as_chunk(base);
  1.1513 +            size_t psize = chunksize(p);
  1.1514              /* Can unmap if first chunk holds entire segment and not pinned */
  1.1515 -            if (!cinuse (p)
  1.1516 +            if (!cinuse(p)
  1.1517                  && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
  1.1518                  tchunkptr tp = (tchunkptr) p;
  1.1519 -                assert (segment_holds (sp, (char *) sp));
  1.1520 +                assert(segment_holds(sp, (char *) sp));
  1.1521                  if (p == m->dv) {
  1.1522                      m->dv = 0;
  1.1523                      m->dvsize = 0;
  1.1524                  } else {
  1.1525 -                    unlink_large_chunk (m, tp);
  1.1526 +                    unlink_large_chunk(m, tp);
  1.1527                  }
  1.1528 -                if (CALL_MUNMAP (base, size) == 0) {
  1.1529 +                if (CALL_MUNMAP(base, size) == 0) {
  1.1530                      released += size;
  1.1531                      m->footprint -= size;
  1.1532                      /* unlink obsoleted record */
  1.1533                      sp = pred;
  1.1534                      sp->next = next;
  1.1535                  } else {        /* back out if cannot unmap */
  1.1536 -                    insert_large_chunk (m, tp, psize);
  1.1537 +                    insert_large_chunk(m, tp, psize);
  1.1538                  }
  1.1539              }
  1.1540          }
  1.1541 @@ -3673,10 +3670,10 @@
  1.1542  }
  1.1543  
  1.1544  static int
  1.1545 -sys_trim (mstate m, size_t pad)
  1.1546 +sys_trim(mstate m, size_t pad)
  1.1547  {
  1.1548      size_t released = 0;
  1.1549 -    if (pad < MAX_REQUEST && is_initialized (m)) {
  1.1550 +    if (pad < MAX_REQUEST && is_initialized(m)) {
  1.1551          pad += TOP_FOOT_SIZE;   /* ensure enough room for segment overhead */
  1.1552  
  1.1553          if (m->topsize > pad) {
  1.1554 @@ -3685,16 +3682,16 @@
  1.1555              size_t extra =
  1.1556                  ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
  1.1557                   SIZE_T_ONE) * unit;
  1.1558 -            msegmentptr sp = segment_holding (m, (char *) m->top);
  1.1559 -
  1.1560 -            if (!is_extern_segment (sp)) {
  1.1561 -                if (is_mmapped_segment (sp)) {
  1.1562 -                    if (HAVE_MMAP && sp->size >= extra && !has_segment_link (m, sp)) {  /* can't shrink if pinned */
  1.1563 +            msegmentptr sp = segment_holding(m, (char *) m->top);
  1.1564 +
  1.1565 +            if (!is_extern_segment(sp)) {
  1.1566 +                if (is_mmapped_segment(sp)) {
  1.1567 +                    if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) {   /* can't shrink if pinned */
  1.1568                          size_t newsize = sp->size - extra;
  1.1569                          /* Prefer mremap, fall back to munmap */
  1.1570                          if ((CALL_MREMAP
  1.1571                               (sp->base, sp->size, newsize, 0) != MFAIL)
  1.1572 -                            || (CALL_MUNMAP (sp->base + newsize, extra)
  1.1573 +                            || (CALL_MUNMAP(sp->base + newsize, extra)
  1.1574                                  == 0)) {
  1.1575                              released = extra;
  1.1576                          }
  1.1577 @@ -3702,32 +3699,32 @@
  1.1578                  } else if (HAVE_MORECORE) {
  1.1579                      if (extra >= HALF_MAX_SIZE_T)       /* Avoid wrapping negative */
  1.1580                          extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
  1.1581 -                    ACQUIRE_MORECORE_LOCK ();
  1.1582 +                    ACQUIRE_MORECORE_LOCK();
  1.1583                      {
  1.1584                          /* Make sure end of memory is where we last set it. */
  1.1585 -                        char *old_br = (char *) (CALL_MORECORE (0));
  1.1586 +                        char *old_br = (char *) (CALL_MORECORE(0));
  1.1587                          if (old_br == sp->base + sp->size) {
  1.1588 -                            char *rel_br = (char *) (CALL_MORECORE (-extra));
  1.1589 -                            char *new_br = (char *) (CALL_MORECORE (0));
  1.1590 +                            char *rel_br = (char *) (CALL_MORECORE(-extra));
  1.1591 +                            char *new_br = (char *) (CALL_MORECORE(0));
  1.1592                              if (rel_br != CMFAIL && new_br < old_br)
  1.1593                                  released = old_br - new_br;
  1.1594                          }
  1.1595                      }
  1.1596 -                    RELEASE_MORECORE_LOCK ();
  1.1597 +                    RELEASE_MORECORE_LOCK();
  1.1598                  }
  1.1599              }
  1.1600  
  1.1601              if (released != 0) {
  1.1602                  sp->size -= released;
  1.1603                  m->footprint -= released;
  1.1604 -                init_top (m, m->top, m->topsize - released);
  1.1605 -                check_top_chunk (m, m->top);
  1.1606 +                init_top(m, m->top, m->topsize - released);
  1.1607 +                check_top_chunk(m, m->top);
  1.1608              }
  1.1609          }
  1.1610  
  1.1611          /* Unmap any unused mmapped segments */
  1.1612          if (HAVE_MMAP)
  1.1613 -            released += release_unused_segments (m);
  1.1614 +            released += release_unused_segments(m);
  1.1615  
  1.1616          /* On failure, disable autotrim to avoid repeated failed future calls */
  1.1617          if (released == 0)
  1.1618 @@ -3741,21 +3738,21 @@
  1.1619  
  1.1620  /* allocate a large request from the best fitting chunk in a treebin */
  1.1621  static void *
  1.1622 -tmalloc_large (mstate m, size_t nb)
  1.1623 +tmalloc_large(mstate m, size_t nb)
  1.1624  {
  1.1625      tchunkptr v = 0;
  1.1626      size_t rsize = -nb;         /* Unsigned negation */
  1.1627      tchunkptr t;
  1.1628      bindex_t idx;
  1.1629 -    compute_tree_index (nb, idx);
  1.1630 -
  1.1631 -    if ((t = *treebin_at (m, idx)) != 0) {
  1.1632 +    compute_tree_index(nb, idx);
  1.1633 +
  1.1634 +    if ((t = *treebin_at(m, idx)) != 0) {
  1.1635          /* Traverse tree for this bin looking for node with size == nb */
  1.1636 -        size_t sizebits = nb << leftshift_for_tree_index (idx);
  1.1637 +        size_t sizebits = nb << leftshift_for_tree_index(idx);
  1.1638          tchunkptr rst = 0;      /* The deepest untaken right subtree */
  1.1639          for (;;) {
  1.1640              tchunkptr rt;
  1.1641 -            size_t trem = chunksize (t) - nb;
  1.1642 +            size_t trem = chunksize(t) - nb;
  1.1643              if (trem < rsize) {
  1.1644                  v = t;
  1.1645                  if ((rsize = trem) == 0)
  1.1646 @@ -3774,150 +3771,150 @@
  1.1647      }
  1.1648  
  1.1649      if (t == 0 && v == 0) {     /* set t to root of next non-empty treebin */
  1.1650 -        binmap_t leftbits = left_bits (idx2bit (idx)) & m->treemap;
  1.1651 +        binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
  1.1652          if (leftbits != 0) {
  1.1653              bindex_t i;
  1.1654 -            binmap_t leastbit = least_bit (leftbits);
  1.1655 -            compute_bit2idx (leastbit, i);
  1.1656 -            t = *treebin_at (m, i);
  1.1657 +            binmap_t leastbit = least_bit(leftbits);
  1.1658 +            compute_bit2idx(leastbit, i);
  1.1659 +            t = *treebin_at(m, i);
  1.1660          }
  1.1661      }
  1.1662  
  1.1663      while (t != 0) {            /* find smallest of tree or subtree */
  1.1664 -        size_t trem = chunksize (t) - nb;
  1.1665 +        size_t trem = chunksize(t) - nb;
  1.1666          if (trem < rsize) {
  1.1667              rsize = trem;
  1.1668              v = t;
  1.1669          }
  1.1670 -        t = leftmost_child (t);
  1.1671 +        t = leftmost_child(t);
  1.1672      }
  1.1673  
  1.1674      /*  If dv is a better fit, return 0 so malloc will use it */
  1.1675      if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
  1.1676 -        if (RTCHECK (ok_address (m, v))) {      /* split */
  1.1677 -            mchunkptr r = chunk_plus_offset (v, nb);
  1.1678 -            assert (chunksize (v) == rsize + nb);
  1.1679 -            if (RTCHECK (ok_next (v, r))) {
  1.1680 -                unlink_large_chunk (m, v);
  1.1681 +        if (RTCHECK(ok_address(m, v))) {        /* split */
  1.1682 +            mchunkptr r = chunk_plus_offset(v, nb);
  1.1683 +            assert(chunksize(v) == rsize + nb);
  1.1684 +            if (RTCHECK(ok_next(v, r))) {
  1.1685 +                unlink_large_chunk(m, v);
  1.1686                  if (rsize < MIN_CHUNK_SIZE)
  1.1687 -                    set_inuse_and_pinuse (m, v, (rsize + nb));
  1.1688 +                    set_inuse_and_pinuse(m, v, (rsize + nb));
  1.1689                  else {
  1.1690 -                    set_size_and_pinuse_of_inuse_chunk (m, v, nb);
  1.1691 -                    set_size_and_pinuse_of_free_chunk (r, rsize);
  1.1692 -                    insert_chunk (m, r, rsize);
  1.1693 +                    set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  1.1694 +                    set_size_and_pinuse_of_free_chunk(r, rsize);
  1.1695 +                    insert_chunk(m, r, rsize);
  1.1696                  }
  1.1697 -                return chunk2mem (v);
  1.1698 +                return chunk2mem(v);
  1.1699              }
  1.1700          }
  1.1701 -        CORRUPTION_ERROR_ACTION (m);
  1.1702 +        CORRUPTION_ERROR_ACTION(m);
  1.1703      }
  1.1704      return 0;
  1.1705  }
  1.1706  
  1.1707  /* allocate a small request from the best fitting chunk in a treebin */
  1.1708  static void *
  1.1709 -tmalloc_small (mstate m, size_t nb)
  1.1710 +tmalloc_small(mstate m, size_t nb)
  1.1711  {
  1.1712      tchunkptr t, v;
  1.1713      size_t rsize;
  1.1714      bindex_t i;
  1.1715 -    binmap_t leastbit = least_bit (m->treemap);
  1.1716 -    compute_bit2idx (leastbit, i);
  1.1717 -
  1.1718 -    v = t = *treebin_at (m, i);
  1.1719 -    rsize = chunksize (t) - nb;
  1.1720 -
  1.1721 -    while ((t = leftmost_child (t)) != 0) {
  1.1722 -        size_t trem = chunksize (t) - nb;
  1.1723 +    binmap_t leastbit = least_bit(m->treemap);
  1.1724 +    compute_bit2idx(leastbit, i);
  1.1725 +
  1.1726 +    v = t = *treebin_at(m, i);
  1.1727 +    rsize = chunksize(t) - nb;
  1.1728 +
  1.1729 +    while ((t = leftmost_child(t)) != 0) {
  1.1730 +        size_t trem = chunksize(t) - nb;
  1.1731          if (trem < rsize) {
  1.1732              rsize = trem;
  1.1733              v = t;
  1.1734          }
  1.1735      }
  1.1736  
  1.1737 -    if (RTCHECK (ok_address (m, v))) {
  1.1738 -        mchunkptr r = chunk_plus_offset (v, nb);
  1.1739 -        assert (chunksize (v) == rsize + nb);
  1.1740 -        if (RTCHECK (ok_next (v, r))) {
  1.1741 -            unlink_large_chunk (m, v);
  1.1742 +    if (RTCHECK(ok_address(m, v))) {
  1.1743 +        mchunkptr r = chunk_plus_offset(v, nb);
  1.1744 +        assert(chunksize(v) == rsize + nb);
  1.1745 +        if (RTCHECK(ok_next(v, r))) {
  1.1746 +            unlink_large_chunk(m, v);
  1.1747              if (rsize < MIN_CHUNK_SIZE)
  1.1748 -                set_inuse_and_pinuse (m, v, (rsize + nb));
  1.1749 +                set_inuse_and_pinuse(m, v, (rsize + nb));
  1.1750              else {
  1.1751 -                set_size_and_pinuse_of_inuse_chunk (m, v, nb);
  1.1752 -                set_size_and_pinuse_of_free_chunk (r, rsize);
  1.1753 -                replace_dv (m, r, rsize);
  1.1754 +                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
  1.1755 +                set_size_and_pinuse_of_free_chunk(r, rsize);
  1.1756 +                replace_dv(m, r, rsize);
  1.1757              }
  1.1758 -            return chunk2mem (v);
  1.1759 +            return chunk2mem(v);
  1.1760          }
  1.1761      }
  1.1762  
  1.1763 -    CORRUPTION_ERROR_ACTION (m);
  1.1764 +    CORRUPTION_ERROR_ACTION(m);
  1.1765      return 0;
  1.1766  }
  1.1767  
  1.1768  /* --------------------------- realloc support --------------------------- */
  1.1769  
  1.1770  static void *
  1.1771 -internal_realloc (mstate m, void *oldmem, size_t bytes)
  1.1772 +internal_realloc(mstate m, void *oldmem, size_t bytes)
  1.1773  {
  1.1774      if (bytes >= MAX_REQUEST) {
  1.1775          MALLOC_FAILURE_ACTION;
  1.1776          return 0;
  1.1777      }
  1.1778 -    if (!PREACTION (m)) {
  1.1779 -        mchunkptr oldp = mem2chunk (oldmem);
  1.1780 -        size_t oldsize = chunksize (oldp);
  1.1781 -        mchunkptr next = chunk_plus_offset (oldp, oldsize);
  1.1782 +    if (!PREACTION(m)) {
  1.1783 +        mchunkptr oldp = mem2chunk(oldmem);
  1.1784 +        size_t oldsize = chunksize(oldp);
  1.1785 +        mchunkptr next = chunk_plus_offset(oldp, oldsize);
  1.1786          mchunkptr newp = 0;
  1.1787          void *extra = 0;
  1.1788  
  1.1789          /* Try to either shrink or extend into top. Else malloc-copy-free */
  1.1790  
  1.1791 -        if (RTCHECK (ok_address (m, oldp) && ok_cinuse (oldp) &&
  1.1792 -                     ok_next (oldp, next) && ok_pinuse (next))) {
  1.1793 -            size_t nb = request2size (bytes);
  1.1794 -            if (is_mmapped (oldp))
  1.1795 -                newp = mmap_resize (m, oldp, nb);
  1.1796 +        if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  1.1797 +                    ok_next(oldp, next) && ok_pinuse(next))) {
  1.1798 +            size_t nb = request2size(bytes);
  1.1799 +            if (is_mmapped(oldp))
  1.1800 +                newp = mmap_resize(m, oldp, nb);
  1.1801              else if (oldsize >= nb) {   /* already big enough */
  1.1802                  size_t rsize = oldsize - nb;
  1.1803                  newp = oldp;
  1.1804                  if (rsize >= MIN_CHUNK_SIZE) {
  1.1805 -                    mchunkptr remainder = chunk_plus_offset (newp, nb);
  1.1806 -                    set_inuse (m, newp, nb);
  1.1807 -                    set_inuse (m, remainder, rsize);
  1.1808 -                    extra = chunk2mem (remainder);
  1.1809 +                    mchunkptr remainder = chunk_plus_offset(newp, nb);
  1.1810 +                    set_inuse(m, newp, nb);
  1.1811 +                    set_inuse(m, remainder, rsize);
  1.1812 +                    extra = chunk2mem(remainder);
  1.1813                  }
  1.1814              } else if (next == m->top && oldsize + m->topsize > nb) {
  1.1815                  /* Expand into top */
  1.1816                  size_t newsize = oldsize + m->topsize;
  1.1817                  size_t newtopsize = newsize - nb;
  1.1818 -                mchunkptr newtop = chunk_plus_offset (oldp, nb);
  1.1819 -                set_inuse (m, oldp, nb);
  1.1820 +                mchunkptr newtop = chunk_plus_offset(oldp, nb);
  1.1821 +                set_inuse(m, oldp, nb);
  1.1822                  newtop->head = newtopsize | PINUSE_BIT;
  1.1823                  m->top = newtop;
  1.1824                  m->topsize = newtopsize;
  1.1825                  newp = oldp;
  1.1826              }
  1.1827          } else {
  1.1828 -            USAGE_ERROR_ACTION (m, oldmem);
  1.1829 -            POSTACTION (m);
  1.1830 +            USAGE_ERROR_ACTION(m, oldmem);
  1.1831 +            POSTACTION(m);
  1.1832              return 0;
  1.1833          }
  1.1834  
  1.1835 -        POSTACTION (m);
  1.1836 +        POSTACTION(m);
  1.1837  
  1.1838          if (newp != 0) {
  1.1839              if (extra != 0) {
  1.1840 -                internal_free (m, extra);
  1.1841 +                internal_free(m, extra);
  1.1842              }
  1.1843 -            check_inuse_chunk (m, newp);
  1.1844 -            return chunk2mem (newp);
  1.1845 +            check_inuse_chunk(m, newp);
  1.1846 +            return chunk2mem(newp);
  1.1847          } else {
  1.1848 -            void *newmem = internal_malloc (m, bytes);
  1.1849 +            void *newmem = internal_malloc(m, bytes);
  1.1850              if (newmem != 0) {
  1.1851 -                size_t oc = oldsize - overhead_for (oldp);
  1.1852 -                memcpy (newmem, oldmem, (oc < bytes) ? oc : bytes);
  1.1853 -                internal_free (m, oldmem);
  1.1854 +                size_t oc = oldsize - overhead_for(oldp);
  1.1855 +                memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
  1.1856 +                internal_free(m, oldmem);
  1.1857              }
  1.1858              return newmem;
  1.1859          }
  1.1860 @@ -3928,10 +3925,10 @@
  1.1861  /* --------------------------- memalign support -------------------------- */
  1.1862  
  1.1863  static void *
  1.1864 -internal_memalign (mstate m, size_t alignment, size_t bytes)
  1.1865 +internal_memalign(mstate m, size_t alignment, size_t bytes)
  1.1866  {
  1.1867      if (alignment <= MALLOC_ALIGNMENT)  /* Can just use malloc */
  1.1868 -        return internal_malloc (m, bytes);
  1.1869 +        return internal_malloc(m, bytes);
  1.1870      if (alignment < MIN_CHUNK_SIZE)     /* must be at least a minimum chunk size */
  1.1871          alignment = MIN_CHUNK_SIZE;
  1.1872      if ((alignment & (alignment - SIZE_T_ONE)) != 0) {  /* Ensure a power of 2 */
  1.1873 @@ -3946,15 +3943,15 @@
  1.1874              MALLOC_FAILURE_ACTION;
  1.1875          }
  1.1876      } else {
  1.1877 -        size_t nb = request2size (bytes);
  1.1878 +        size_t nb = request2size(bytes);
  1.1879          size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
  1.1880 -        char *mem = (char *) internal_malloc (m, req);
  1.1881 +        char *mem = (char *) internal_malloc(m, req);
  1.1882          if (mem != 0) {
  1.1883              void *leader = 0;
  1.1884              void *trailer = 0;
  1.1885 -            mchunkptr p = mem2chunk (mem);
  1.1886 -
  1.1887 -            if (PREACTION (m))
  1.1888 +            mchunkptr p = mem2chunk(mem);
  1.1889 +
  1.1890 +            if (PREACTION(m))
  1.1891                  return 0;
  1.1892              if ((((size_t) (mem)) % alignment) != 0) {  /* misaligned */
  1.1893                  /*
  1.1894 @@ -3966,51 +3963,51 @@
  1.1895                     possible.
  1.1896                   */
  1.1897                  char *br = (char *)
  1.1898 -                    mem2chunk ((size_t)
  1.1899 -                               (((size_t)
  1.1900 -                                 (mem + alignment -
  1.1901 -                                  SIZE_T_ONE)) & -alignment));
  1.1902 +                    mem2chunk((size_t)
  1.1903 +                              (((size_t)
  1.1904 +                                (mem + alignment -
  1.1905 +                                 SIZE_T_ONE)) & -alignment));
  1.1906                  char *pos =
  1.1907                      ((size_t) (br - (char *) (p)) >=
  1.1908                       MIN_CHUNK_SIZE) ? br : br + alignment;
  1.1909                  mchunkptr newp = (mchunkptr) pos;
  1.1910                  size_t leadsize = pos - (char *) (p);
  1.1911 -                size_t newsize = chunksize (p) - leadsize;
  1.1912 -
  1.1913 -                if (is_mmapped (p)) {   /* For mmapped chunks, just adjust offset */
  1.1914 +                size_t newsize = chunksize(p) - leadsize;
  1.1915 +
  1.1916 +                if (is_mmapped(p)) {    /* For mmapped chunks, just adjust offset */
  1.1917                      newp->prev_foot = p->prev_foot + leadsize;
  1.1918                      newp->head = (newsize | CINUSE_BIT);
  1.1919                  } else {        /* Otherwise, give back leader, use the rest */
  1.1920 -                    set_inuse (m, newp, newsize);
  1.1921 -                    set_inuse (m, p, leadsize);
  1.1922 -                    leader = chunk2mem (p);
  1.1923 +                    set_inuse(m, newp, newsize);
  1.1924 +                    set_inuse(m, p, leadsize);
  1.1925 +                    leader = chunk2mem(p);
  1.1926                  }
  1.1927                  p = newp;
  1.1928              }
  1.1929  
  1.1930              /* Give back spare room at the end */
  1.1931 -            if (!is_mmapped (p)) {
  1.1932 -                size_t size = chunksize (p);
  1.1933 +            if (!is_mmapped(p)) {
  1.1934 +                size_t size = chunksize(p);
  1.1935                  if (size > nb + MIN_CHUNK_SIZE) {
  1.1936                      size_t remainder_size = size - nb;
  1.1937 -                    mchunkptr remainder = chunk_plus_offset (p, nb);
  1.1938 -                    set_inuse (m, p, nb);
  1.1939 -                    set_inuse (m, remainder, remainder_size);
  1.1940 -                    trailer = chunk2mem (remainder);
  1.1941 +                    mchunkptr remainder = chunk_plus_offset(p, nb);
  1.1942 +                    set_inuse(m, p, nb);
  1.1943 +                    set_inuse(m, remainder, remainder_size);
  1.1944 +                    trailer = chunk2mem(remainder);
  1.1945                  }
  1.1946              }
  1.1947  
  1.1948 -            assert (chunksize (p) >= nb);
  1.1949 -            assert ((((size_t) (chunk2mem (p))) % alignment) == 0);
  1.1950 -            check_inuse_chunk (m, p);
  1.1951 -            POSTACTION (m);
  1.1952 +            assert(chunksize(p) >= nb);
  1.1953 +            assert((((size_t) (chunk2mem(p))) % alignment) == 0);
  1.1954 +            check_inuse_chunk(m, p);
  1.1955 +            POSTACTION(m);
  1.1956              if (leader != 0) {
  1.1957 -                internal_free (m, leader);
  1.1958 +                internal_free(m, leader);
  1.1959              }
  1.1960              if (trailer != 0) {
  1.1961 -                internal_free (m, trailer);
  1.1962 +                internal_free(m, trailer);
  1.1963              }
  1.1964 -            return chunk2mem (p);
  1.1965 +            return chunk2mem(p);
  1.1966          }
  1.1967      }
  1.1968      return 0;
  1.1969 @@ -4019,7 +4016,7 @@
  1.1970  /* ------------------------ comalloc/coalloc support --------------------- */
  1.1971  
  1.1972  static void **
  1.1973 -ialloc (mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
  1.1974 +ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
  1.1975  {
  1.1976      /*
  1.1977         This provides common support for independent_X routines, handling
  1.1978 @@ -4051,20 +4048,20 @@
  1.1979      } else {
  1.1980          /* if empty req, must still return chunk representing empty array */
  1.1981          if (n_elements == 0)
  1.1982 -            return (void **) internal_malloc (m, 0);
  1.1983 +            return (void **) internal_malloc(m, 0);
  1.1984          marray = 0;
  1.1985 -        array_size = request2size (n_elements * (sizeof (void *)));
  1.1986 +        array_size = request2size(n_elements * (sizeof(void *)));
  1.1987      }
  1.1988  
  1.1989      /* compute total element size */
  1.1990      if (opts & 0x1) {           /* all-same-size */
  1.1991 -        element_size = request2size (*sizes);
  1.1992 +        element_size = request2size(*sizes);
  1.1993          contents_size = n_elements * element_size;
  1.1994      } else {                    /* add up all the sizes */
  1.1995          element_size = 0;
  1.1996          contents_size = 0;
  1.1997          for (i = 0; i != n_elements; ++i)
  1.1998 -            contents_size += request2size (sizes[i]);
  1.1999 +            contents_size += request2size(sizes[i]);
  1.2000      }
  1.2001  
  1.2002      size = contents_size + array_size;
  1.2003 @@ -4074,48 +4071,48 @@
  1.2004         malloc won't use it, since we would not be able to later
  1.2005         free/realloc space internal to a segregated mmap region.
  1.2006       */
  1.2007 -    was_enabled = use_mmap (m);
  1.2008 -    disable_mmap (m);
  1.2009 -    mem = internal_malloc (m, size - CHUNK_OVERHEAD);
  1.2010 +    was_enabled = use_mmap(m);
  1.2011 +    disable_mmap(m);
  1.2012 +    mem = internal_malloc(m, size - CHUNK_OVERHEAD);
  1.2013      if (was_enabled)
  1.2014 -        enable_mmap (m);
  1.2015 +        enable_mmap(m);
  1.2016      if (mem == 0)
  1.2017          return 0;
  1.2018  
  1.2019 -    if (PREACTION (m))
  1.2020 +    if (PREACTION(m))
  1.2021          return 0;
  1.2022 -    p = mem2chunk (mem);
  1.2023 -    remainder_size = chunksize (p);
  1.2024 -
  1.2025 -    assert (!is_mmapped (p));
  1.2026 +    p = mem2chunk(mem);
  1.2027 +    remainder_size = chunksize(p);
  1.2028 +
  1.2029 +    assert(!is_mmapped(p));
  1.2030  
  1.2031      if (opts & 0x2) {           /* optionally clear the elements */
  1.2032 -        memset ((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  1.2033 +        memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
  1.2034      }
  1.2035  
  1.2036      /* If not provided, allocate the pointer array as final part of chunk */
  1.2037      if (marray == 0) {
  1.2038          size_t array_chunk_size;
  1.2039 -        array_chunk = chunk_plus_offset (p, contents_size);
  1.2040 +        array_chunk = chunk_plus_offset(p, contents_size);
  1.2041          array_chunk_size = remainder_size - contents_size;
  1.2042 -        marray = (void **) (chunk2mem (array_chunk));
  1.2043 -        set_size_and_pinuse_of_inuse_chunk (m, array_chunk, array_chunk_size);
  1.2044 +        marray = (void **) (chunk2mem(array_chunk));
  1.2045 +        set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
  1.2046          remainder_size = contents_size;
  1.2047      }
  1.2048  
  1.2049      /* split out elements */
  1.2050      for (i = 0;; ++i) {
  1.2051 -        marray[i] = chunk2mem (p);
  1.2052 +        marray[i] = chunk2mem(p);
  1.2053          if (i != n_elements - 1) {
  1.2054              if (element_size != 0)
  1.2055                  size = element_size;
  1.2056              else
  1.2057 -                size = request2size (sizes[i]);
  1.2058 +                size = request2size(sizes[i]);
  1.2059              remainder_size -= size;
  1.2060 -            set_size_and_pinuse_of_inuse_chunk (m, p, size);
  1.2061 -            p = chunk_plus_offset (p, size);
  1.2062 +            set_size_and_pinuse_of_inuse_chunk(m, p, size);
  1.2063 +            p = chunk_plus_offset(p, size);
  1.2064          } else {                /* the final element absorbs any overallocation slop */
  1.2065 -            set_size_and_pinuse_of_inuse_chunk (m, p, remainder_size);
  1.2066 +            set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
  1.2067              break;
  1.2068          }
  1.2069      }
  1.2070 @@ -4124,18 +4121,18 @@
  1.2071      if (marray != chunks) {
  1.2072          /* final element must have exactly exhausted chunk */
  1.2073          if (element_size != 0) {
  1.2074 -            assert (remainder_size == element_size);
  1.2075 +            assert(remainder_size == element_size);
  1.2076          } else {
  1.2077 -            assert (remainder_size == request2size (sizes[i]));
  1.2078 +            assert(remainder_size == request2size(sizes[i]));
  1.2079          }
  1.2080 -        check_inuse_chunk (m, mem2chunk (marray));
  1.2081 +        check_inuse_chunk(m, mem2chunk(marray));
  1.2082      }
  1.2083      for (i = 0; i != n_elements; ++i)
  1.2084 -        check_inuse_chunk (m, mem2chunk (marray[i]));
  1.2085 +        check_inuse_chunk(m, mem2chunk(marray[i]));
  1.2086  
  1.2087  #endif /* DEBUG */
  1.2088  
  1.2089 -    POSTACTION (m);
  1.2090 +    POSTACTION(m);
  1.2091      return marray;
  1.2092  }
  1.2093  
  1.2094 @@ -4145,7 +4142,7 @@
  1.2095  #if !ONLY_MSPACES
  1.2096  
  1.2097  void *
  1.2098 -dlmalloc (size_t bytes)
  1.2099 +dlmalloc(size_t bytes)
  1.2100  {
  1.2101      /*
  1.2102         Basic algorithm:
  1.2103 @@ -4170,26 +4167,26 @@
  1.2104         The ugly goto's here ensure that postaction occurs along all paths.
  1.2105       */
  1.2106  
  1.2107 -    if (!PREACTION (gm)) {
  1.2108 +    if (!PREACTION(gm)) {
  1.2109          void *mem;
  1.2110          size_t nb;
  1.2111          if (bytes <= MAX_SMALL_REQUEST) {
  1.2112              bindex_t idx;
  1.2113              binmap_t smallbits;
  1.2114 -            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
  1.2115 -            idx = small_index (nb);
  1.2116 +            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
  1.2117 +            idx = small_index(nb);
  1.2118              smallbits = gm->smallmap >> idx;
  1.2119  
  1.2120              if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
  1.2121                  mchunkptr b, p;
  1.2122                  idx += ~smallbits & 1;  /* Uses next bin if idx empty */
  1.2123 -                b = smallbin_at (gm, idx);
  1.2124 +                b = smallbin_at(gm, idx);
  1.2125                  p = b->fd;
  1.2126 -                assert (chunksize (p) == small_index2size (idx));
  1.2127 -                unlink_first_small_chunk (gm, b, p, idx);
  1.2128 -                set_inuse_and_pinuse (gm, p, small_index2size (idx));
  1.2129 -                mem = chunk2mem (p);
  1.2130 -                check_malloced_chunk (gm, mem, nb);
  1.2131 +                assert(chunksize(p) == small_index2size(idx));
  1.2132 +                unlink_first_small_chunk(gm, b, p, idx);
  1.2133 +                set_inuse_and_pinuse(gm, p, small_index2size(idx));
  1.2134 +                mem = chunk2mem(p);
  1.2135 +                check_malloced_chunk(gm, mem, nb);
  1.2136                  goto postaction;
  1.2137              }
  1.2138  
  1.2139 @@ -4199,40 +4196,40 @@
  1.2140                      size_t rsize;
  1.2141                      bindex_t i;
  1.2142                      binmap_t leftbits =
  1.2143 -                        (smallbits << idx) & left_bits (idx2bit (idx));
  1.2144 -                    binmap_t leastbit = least_bit (leftbits);
  1.2145 -                    compute_bit2idx (leastbit, i);
  1.2146 -                    b = smallbin_at (gm, i);
  1.2147 +                        (smallbits << idx) & left_bits(idx2bit(idx));
  1.2148 +                    binmap_t leastbit = least_bit(leftbits);
  1.2149 +                    compute_bit2idx(leastbit, i);
  1.2150 +                    b = smallbin_at(gm, i);
  1.2151                      p = b->fd;
  1.2152 -                    assert (chunksize (p) == small_index2size (i));
  1.2153 -                    unlink_first_small_chunk (gm, b, p, i);
  1.2154 -                    rsize = small_index2size (i) - nb;
  1.2155 +                    assert(chunksize(p) == small_index2size(i));
  1.2156 +                    unlink_first_small_chunk(gm, b, p, i);
  1.2157 +                    rsize = small_index2size(i) - nb;
  1.2158                      /* Fit here cannot be remainderless if 4byte sizes */
  1.2159                      if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  1.2160 -                        set_inuse_and_pinuse (gm, p, small_index2size (i));
  1.2161 +                        set_inuse_and_pinuse(gm, p, small_index2size(i));
  1.2162                      else {
  1.2163 -                        set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
  1.2164 -                        r = chunk_plus_offset (p, nb);
  1.2165 -                        set_size_and_pinuse_of_free_chunk (r, rsize);
  1.2166 -                        replace_dv (gm, r, rsize);
  1.2167 +                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.2168 +                        r = chunk_plus_offset(p, nb);
  1.2169 +                        set_size_and_pinuse_of_free_chunk(r, rsize);
  1.2170 +                        replace_dv(gm, r, rsize);
  1.2171                      }
  1.2172 -                    mem = chunk2mem (p);
  1.2173 -                    check_malloced_chunk (gm, mem, nb);
  1.2174 +                    mem = chunk2mem(p);
  1.2175 +                    check_malloced_chunk(gm, mem, nb);
  1.2176                      goto postaction;
  1.2177                  }
  1.2178  
  1.2179                  else if (gm->treemap != 0
  1.2180 -                         && (mem = tmalloc_small (gm, nb)) != 0) {
  1.2181 -                    check_malloced_chunk (gm, mem, nb);
  1.2182 +                         && (mem = tmalloc_small(gm, nb)) != 0) {
  1.2183 +                    check_malloced_chunk(gm, mem, nb);
  1.2184                      goto postaction;
  1.2185                  }
  1.2186              }
  1.2187          } else if (bytes >= MAX_REQUEST)
  1.2188              nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
  1.2189          else {
  1.2190 -            nb = pad_request (bytes);
  1.2191 -            if (gm->treemap != 0 && (mem = tmalloc_large (gm, nb)) != 0) {
  1.2192 -                check_malloced_chunk (gm, mem, nb);
  1.2193 +            nb = pad_request(bytes);
  1.2194 +            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
  1.2195 +                check_malloced_chunk(gm, mem, nb);
  1.2196                  goto postaction;
  1.2197              }
  1.2198          }
  1.2199 @@ -4241,37 +4238,37 @@
  1.2200              size_t rsize = gm->dvsize - nb;
  1.2201              mchunkptr p = gm->dv;
  1.2202              if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
  1.2203 -                mchunkptr r = gm->dv = chunk_plus_offset (p, nb);
  1.2204 +                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
  1.2205                  gm->dvsize = rsize;
  1.2206 -                set_size_and_pinuse_of_free_chunk (r, rsize);
  1.2207 -                set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
  1.2208 +                set_size_and_pinuse_of_free_chunk(r, rsize);
  1.2209 +                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.2210              } else {            /* exhaust dv */
  1.2211                  size_t dvs = gm->dvsize;
  1.2212                  gm->dvsize = 0;
  1.2213                  gm->dv = 0;
  1.2214 -                set_inuse_and_pinuse (gm, p, dvs);
  1.2215 +                set_inuse_and_pinuse(gm, p, dvs);
  1.2216              }
  1.2217 -            mem = chunk2mem (p);
  1.2218 -            check_malloced_chunk (gm, mem, nb);
  1.2219 +            mem = chunk2mem(p);
  1.2220 +            check_malloced_chunk(gm, mem, nb);
  1.2221              goto postaction;
  1.2222          }
  1.2223  
  1.2224          else if (nb < gm->topsize) {    /* Split top */
  1.2225              size_t rsize = gm->topsize -= nb;
  1.2226              mchunkptr p = gm->top;
  1.2227 -            mchunkptr r = gm->top = chunk_plus_offset (p, nb);
  1.2228 +            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
  1.2229              r->head = rsize | PINUSE_BIT;
  1.2230 -            set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
  1.2231 -            mem = chunk2mem (p);
  1.2232 -            check_top_chunk (gm, gm->top);
  1.2233 -            check_malloced_chunk (gm, mem, nb);
  1.2234 +            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  1.2235 +            mem = chunk2mem(p);
  1.2236 +            check_top_chunk(gm, gm->top);
  1.2237 +            check_malloced_chunk(gm, mem, nb);
  1.2238              goto postaction;
  1.2239          }
  1.2240  
  1.2241 -        mem = sys_alloc (gm, nb);
  1.2242 +        mem = sys_alloc(gm, nb);
  1.2243  
  1.2244        postaction:
  1.2245 -        POSTACTION (gm);
  1.2246 +        POSTACTION(gm);
  1.2247          return mem;
  1.2248      }
  1.2249  
  1.2250 @@ -4279,7 +4276,7 @@
  1.2251  }
  1.2252  
  1.2253  void
  1.2254 -dlfree (void *mem)
  1.2255 +dlfree(void *mem)
  1.2256  {
  1.2257      /*
  1.2258         Consolidate freed chunks with preceeding or succeeding bordering
  1.2259 @@ -4288,40 +4285,40 @@
  1.2260       */
  1.2261  
  1.2262      if (mem != 0) {
  1.2263 -        mchunkptr p = mem2chunk (mem);
  1.2264 +        mchunkptr p = mem2chunk(mem);
  1.2265  #if FOOTERS
  1.2266 -        mstate fm = get_mstate_for (p);
  1.2267 -        if (!ok_magic (fm)) {
  1.2268 -            USAGE_ERROR_ACTION (fm, p);
  1.2269 +        mstate fm = get_mstate_for(p);
  1.2270 +        if (!ok_magic(fm)) {
  1.2271 +            USAGE_ERROR_ACTION(fm, p);
  1.2272              return;
  1.2273          }
  1.2274  #else /* FOOTERS */
  1.2275  #define fm gm
  1.2276  #endif /* FOOTERS */
  1.2277 -        if (!PREACTION (fm)) {
  1.2278 -            check_inuse_chunk (fm, p);
  1.2279 -            if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) {
  1.2280 -                size_t psize = chunksize (p);
  1.2281 -                mchunkptr next = chunk_plus_offset (p, psize);
  1.2282 -                if (!pinuse (p)) {
  1.2283 +        if (!PREACTION(fm)) {
  1.2284 +            check_inuse_chunk(fm, p);
  1.2285 +            if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  1.2286 +                size_t psize = chunksize(p);
  1.2287 +                mchunkptr next = chunk_plus_offset(p, psize);
  1.2288 +                if (!pinuse(p)) {
  1.2289                      size_t prevsize = p->prev_foot;
  1.2290                      if ((prevsize & IS_MMAPPED_BIT) != 0) {
  1.2291                          prevsize &= ~IS_MMAPPED_BIT;
  1.2292                          psize += prevsize + MMAP_FOOT_PAD;
  1.2293 -                        if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
  1.2294 +                        if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
  1.2295                              fm->footprint -= psize;
  1.2296                          goto postaction;
  1.2297                      } else {
  1.2298 -                        mchunkptr prev = chunk_minus_offset (p, prevsize);
  1.2299 +                        mchunkptr prev = chunk_minus_offset(p, prevsize);
  1.2300                          psize += prevsize;
  1.2301                          p = prev;
  1.2302 -                        if (RTCHECK (ok_address (fm, prev))) {  /* consolidate backward */
  1.2303 +                        if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
  1.2304                              if (p != fm->dv) {
  1.2305 -                                unlink_chunk (fm, p, prevsize);
  1.2306 +                                unlink_chunk(fm, p, prevsize);
  1.2307                              } else if ((next->head & INUSE_BITS) ==
  1.2308                                         INUSE_BITS) {
  1.2309                                  fm->dvsize = psize;
  1.2310 -                                set_free_with_pinuse (p, psize, next);
  1.2311 +                                set_free_with_pinuse(p, psize, next);
  1.2312                                  goto postaction;
  1.2313                              }
  1.2314                          } else
  1.2315 @@ -4329,8 +4326,8 @@
  1.2316                      }
  1.2317                  }
  1.2318  
  1.2319 -                if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) {
  1.2320 -                    if (!cinuse (next)) {       /* consolidate forward */
  1.2321 +                if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  1.2322 +                    if (!cinuse(next)) {        /* consolidate forward */
  1.2323                          if (next == fm->top) {
  1.2324                              size_t tsize = fm->topsize += psize;
  1.2325                              fm->top = p;
  1.2326 @@ -4339,35 +4336,35 @@
  1.2327                                  fm->dv = 0;
  1.2328                                  fm->dvsize = 0;
  1.2329                              }
  1.2330 -                            if (should_trim (fm, tsize))
  1.2331 -                                sys_trim (fm, 0);
  1.2332 +                            if (should_trim(fm, tsize))
  1.2333 +                                sys_trim(fm, 0);
  1.2334                              goto postaction;
  1.2335                          } else if (next == fm->dv) {
  1.2336                              size_t dsize = fm->dvsize += psize;
  1.2337                              fm->dv = p;
  1.2338 -                            set_size_and_pinuse_of_free_chunk (p, dsize);
  1.2339 +                            set_size_and_pinuse_of_free_chunk(p, dsize);
  1.2340                              goto postaction;
  1.2341                          } else {
  1.2342 -                            size_t nsize = chunksize (next);
  1.2343 +                            size_t nsize = chunksize(next);
  1.2344                              psize += nsize;
  1.2345 -                            unlink_chunk (fm, next, nsize);
  1.2346 -                            set_size_and_pinuse_of_free_chunk (p, psize);
  1.2347 +                            unlink_chunk(fm, next, nsize);
  1.2348 +                            set_size_and_pinuse_of_free_chunk(p, psize);
  1.2349                              if (p == fm->dv) {
  1.2350                                  fm->dvsize = psize;
  1.2351                                  goto postaction;
  1.2352                              }
  1.2353                          }
  1.2354                      } else
  1.2355 -                        set_free_with_pinuse (p, psize, next);
  1.2356 -                    insert_chunk (fm, p, psize);
  1.2357 -                    check_free_chunk (fm, p);
  1.2358 +                        set_free_with_pinuse(p, psize, next);
  1.2359 +                    insert_chunk(fm, p, psize);
  1.2360 +                    check_free_chunk(fm, p);
  1.2361                      goto postaction;
  1.2362                  }
  1.2363              }
  1.2364            erroraction:
  1.2365 -            USAGE_ERROR_ACTION (fm, p);
  1.2366 +            USAGE_ERROR_ACTION(fm, p);
  1.2367            postaction:
  1.2368 -            POSTACTION (fm);
  1.2369 +            POSTACTION(fm);
  1.2370          }
  1.2371      }
  1.2372  #if !FOOTERS
  1.2373 @@ -4376,7 +4373,7 @@
  1.2374  }
  1.2375  
  1.2376  void *
  1.2377 -dlcalloc (size_t n_elements, size_t elem_size)
  1.2378 +dlcalloc(size_t n_elements, size_t elem_size)
  1.2379  {
  1.2380      void *mem;
  1.2381      size_t req = 0;
  1.2382 @@ -4386,20 +4383,20 @@
  1.2383              (req / n_elements != elem_size))
  1.2384              req = MAX_SIZE_T;   /* force downstream failure on overflow */
  1.2385      }
  1.2386 -    mem = dlmalloc (req);
  1.2387 -    if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
  1.2388 -        memset (mem, 0, req);
  1.2389 +    mem = dlmalloc(req);
  1.2390 +    if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  1.2391 +        memset(mem, 0, req);
  1.2392      return mem;
  1.2393  }
  1.2394  
  1.2395  void *
  1.2396 -dlrealloc (void *oldmem, size_t bytes)
  1.2397 +dlrealloc(void *oldmem, size_t bytes)
  1.2398  {
  1.2399      if (oldmem == 0)
  1.2400 -        return dlmalloc (bytes);
  1.2401 +        return dlmalloc(bytes);
  1.2402  #ifdef REALLOC_ZERO_BYTES_FREES
  1.2403      if (bytes == 0) {
  1.2404 -        dlfree (oldmem);
  1.2405 +        dlfree(oldmem);
  1.2406          return 0;
  1.2407      }
  1.2408  #endif /* REALLOC_ZERO_BYTES_FREES */
  1.2409 @@ -4407,107 +4404,106 @@
  1.2410  #if ! FOOTERS
  1.2411          mstate m = gm;
  1.2412  #else /* FOOTERS */
  1.2413 -        mstate m = get_mstate_for (mem2chunk (oldmem));
  1.2414 -        if (!ok_magic (m)) {
  1.2415 -            USAGE_ERROR_ACTION (m, oldmem);
  1.2416 +        mstate m = get_mstate_for(mem2chunk(oldmem));
  1.2417 +        if (!ok_magic(m)) {
  1.2418 +            USAGE_ERROR_ACTION(m, oldmem);
  1.2419              return 0;
  1.2420          }
  1.2421  #endif /* FOOTERS */
  1.2422 -        return internal_realloc (m, oldmem, bytes);
  1.2423 +        return internal_realloc(m, oldmem, bytes);
  1.2424      }
  1.2425  }
  1.2426  
  1.2427  void *
  1.2428 -dlmemalign (size_t alignment, size_t bytes)
  1.2429 +dlmemalign(size_t alignment, size_t bytes)
  1.2430  {
  1.2431 -    return internal_memalign (gm, alignment, bytes);
  1.2432 +    return internal_memalign(gm, alignment, bytes);
  1.2433  }
  1.2434  
  1.2435  void **
  1.2436 -dlindependent_calloc (size_t n_elements, size_t elem_size, void *chunks[])
  1.2437 +dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
  1.2438  {
  1.2439      size_t sz = elem_size;      /* serves as 1-element array */
  1.2440 -    return ialloc (gm, n_elements, &sz, 3, chunks);
  1.2441 +    return ialloc(gm, n_elements, &sz, 3, chunks);
  1.2442  }
  1.2443  
  1.2444  void **
  1.2445 -dlindependent_comalloc (size_t n_elements, size_t sizes[], void *chunks[])
  1.2446 +dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
  1.2447  {
  1.2448 -    return ialloc (gm, n_elements, sizes, 0, chunks);
  1.2449 +    return ialloc(gm, n_elements, sizes, 0, chunks);
  1.2450  }
  1.2451  
  1.2452  void *
  1.2453 -dlvalloc (size_t bytes)
  1.2454 +dlvalloc(size_t bytes)
  1.2455  {
  1.2456      size_t pagesz;
  1.2457 -    init_mparams ();
  1.2458 +    init_mparams();
  1.2459      pagesz = mparams.page_size;
  1.2460 -    return dlmemalign (pagesz, bytes);
  1.2461 +    return dlmemalign(pagesz, bytes);
  1.2462  }
  1.2463  
  1.2464  void *
  1.2465 -dlpvalloc (size_t bytes)
  1.2466 +dlpvalloc(size_t bytes)
  1.2467  {
  1.2468      size_t pagesz;
  1.2469 -    init_mparams ();
  1.2470 +    init_mparams();
  1.2471      pagesz = mparams.page_size;
  1.2472 -    return dlmemalign (pagesz,
  1.2473 -                       (bytes + pagesz - SIZE_T_ONE) & ~(pagesz -
  1.2474 -                                                         SIZE_T_ONE));
  1.2475 +    return dlmemalign(pagesz,
  1.2476 +                      (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
  1.2477  }
  1.2478  
  1.2479  int
  1.2480 -dlmalloc_trim (size_t pad)
  1.2481 +dlmalloc_trim(size_t pad)
  1.2482  {
  1.2483      int result = 0;
  1.2484 -    if (!PREACTION (gm)) {
  1.2485 -        result = sys_trim (gm, pad);
  1.2486 -        POSTACTION (gm);
  1.2487 +    if (!PREACTION(gm)) {
  1.2488 +        result = sys_trim(gm, pad);
  1.2489 +        POSTACTION(gm);
  1.2490      }
  1.2491      return result;
  1.2492  }
  1.2493  
  1.2494  size_t
  1.2495 -dlmalloc_footprint (void)
  1.2496 +dlmalloc_footprint(void)
  1.2497  {
  1.2498      return gm->footprint;
  1.2499  }
  1.2500  
  1.2501  size_t
  1.2502 -dlmalloc_max_footprint (void)
  1.2503 +dlmalloc_max_footprint(void)
  1.2504  {
  1.2505      return gm->max_footprint;
  1.2506  }
  1.2507  
  1.2508  #if !NO_MALLINFO
  1.2509  struct mallinfo
  1.2510 -dlmallinfo (void)
  1.2511 +dlmallinfo(void)
  1.2512  {
  1.2513 -    return internal_mallinfo (gm);
  1.2514 +    return internal_mallinfo(gm);
  1.2515  }
  1.2516  #endif /* NO_MALLINFO */
  1.2517  
  1.2518  void
  1.2519 -dlmalloc_stats ()
  1.2520 +dlmalloc_stats()
  1.2521  {
  1.2522 -    internal_malloc_stats (gm);
  1.2523 +    internal_malloc_stats(gm);
  1.2524  }
  1.2525  
  1.2526  size_t
  1.2527 -dlmalloc_usable_size (void *mem)
  1.2528 +dlmalloc_usable_size(void *mem)
  1.2529  {
  1.2530      if (mem != 0) {
  1.2531 -        mchunkptr p = mem2chunk (mem);
  1.2532 -        if (cinuse (p))
  1.2533 -            return chunksize (p) - overhead_for (p);
  1.2534 +        mchunkptr p = mem2chunk(mem);
  1.2535 +        if (cinuse(p))
  1.2536 +            return chunksize(p) - overhead_for(p);
  1.2537      }
  1.2538      return 0;
  1.2539  }
  1.2540  
  1.2541  int
  1.2542 -dlmallopt (int param_number, int value)
  1.2543 +dlmallopt(int param_number, int value)
  1.2544  {
  1.2545 -    return change_mparam (param_number, value);
  1.2546 +    return change_mparam(param_number, value);
  1.2547  }
  1.2548  
  1.2549  #endif /* !ONLY_MSPACES */
  1.2550 @@ -4517,71 +4513,70 @@
  1.2551  #if MSPACES
  1.2552  
  1.2553  static mstate
  1.2554 -init_user_mstate (char *tbase, size_t tsize)
  1.2555 +init_user_mstate(char *tbase, size_t tsize)
  1.2556  {
  1.2557 -    size_t msize = pad_request (sizeof (struct malloc_state));
  1.2558 +    size_t msize = pad_request(sizeof(struct malloc_state));
  1.2559      mchunkptr mn;
  1.2560 -    mchunkptr msp = align_as_chunk (tbase);
  1.2561 -    mstate m = (mstate) (chunk2mem (msp));
  1.2562 -    memset (m, 0, msize);
  1.2563 -    INITIAL_LOCK (&m->mutex);
  1.2564 +    mchunkptr msp = align_as_chunk(tbase);
  1.2565 +    mstate m = (mstate) (chunk2mem(msp));
  1.2566 +    memset(m, 0, msize);
  1.2567 +    INITIAL_LOCK(&m->mutex);
  1.2568      msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
  1.2569      m->seg.base = m->least_addr = tbase;
  1.2570      m->seg.size = m->footprint = m->max_footprint = tsize;
  1.2571      m->magic = mparams.magic;
  1.2572      m->mflags = mparams.default_mflags;
  1.2573 -    disable_contiguous (m);
  1.2574 -    init_bins (m);
  1.2575 -    mn = next_chunk (mem2chunk (m));
  1.2576 -    init_top (m, mn,
  1.2577 -              (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
  1.2578 -    check_top_chunk (m, m->top);
  1.2579 +    disable_contiguous(m);
  1.2580 +    init_bins(m);
  1.2581 +    mn = next_chunk(mem2chunk(m));
  1.2582 +    init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
  1.2583 +    check_top_chunk(m, m->top);
  1.2584      return m;
  1.2585  }
  1.2586  
  1.2587  mspace
  1.2588 -create_mspace (size_t capacity, int locked)
  1.2589 +create_mspace(size_t capacity, int locked)
  1.2590  {
  1.2591      mstate m = 0;
  1.2592 -    size_t msize = pad_request (sizeof (struct malloc_state));
  1.2593 -    init_mparams ();            /* Ensure pagesize etc initialized */
  1.2594 +    size_t msize = pad_request(sizeof(struct malloc_state));
  1.2595 +    init_mparams();             /* Ensure pagesize etc initialized */
  1.2596  
  1.2597      if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  1.2598          size_t rs = ((capacity == 0) ? mparams.granularity :
  1.2599                       (capacity + TOP_FOOT_SIZE + msize));
  1.2600 -        size_t tsize = granularity_align (rs);
  1.2601 -        char *tbase = (char *) (CALL_MMAP (tsize));
  1.2602 +        size_t tsize = granularity_align(rs);
  1.2603 +        char *tbase = (char *) (CALL_MMAP(tsize));
  1.2604          if (tbase != CMFAIL) {
  1.2605 -            m = init_user_mstate (tbase, tsize);
  1.2606 +            m = init_user_mstate(tbase, tsize);
  1.2607              m->seg.sflags = IS_MMAPPED_BIT;
  1.2608 -            set_lock (m, locked);
  1.2609 +            set_lock(m, locked);
  1.2610          }
  1.2611      }
  1.2612      return (mspace) m;
  1.2613  }
  1.2614  
  1.2615  mspace
  1.2616 -create_mspace_with_base (void *base, size_t capacity, int locked)
  1.2617 +create_mspace_with_base(void *base, size_t capacity, int locked)
  1.2618  {
  1.2619      mstate m = 0;
  1.2620 -    size_t msize = pad_request (sizeof (struct malloc_state));
  1.2621 -    init_mparams ();            /* Ensure pagesize etc initialized */
  1.2622 +    size_t msize = pad_request(sizeof(struct malloc_state));
  1.2623 +    init_mparams();             /* Ensure pagesize etc initialized */
  1.2624  
  1.2625      if (capacity > msize + TOP_FOOT_SIZE &&
  1.2626          capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  1.2627 -        m = init_user_mstate ((char *) base, capacity);
  1.2628 +        m = init_user_mstate((char *) base, capacity);
  1.2629          m->seg.sflags = EXTERN_BIT;
  1.2630 -        set_lock (m, locked);
  1.2631 +        set_lock(m, locked);
  1.2632      }
  1.2633      return (mspace) m;
  1.2634  }
  1.2635  
  1.2636  size_t
  1.2637 -destroy_mspace (mspace msp)
  1.2638 +destroy_mspace(mspace msp)
  1.2639  {
  1.2640      size_t freed = 0;
  1.2641      mstate ms = (mstate) msp;
  1.2642 -    if (ok_magic (ms)) {
  1.2643 +    if (ok_magic(ms)) {
  1.2644          msegmentptr sp = &ms->seg;
  1.2645          while (sp != 0) {
  1.2646              char *base = sp->base;
  1.2647 @@ -4589,11 +4584,11 @@
  1.2648              flag_t flag = sp->sflags;
  1.2649              sp = sp->next;
  1.2650              if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  1.2651 -                CALL_MUNMAP (base, size) == 0)
  1.2652 +                CALL_MUNMAP(base, size) == 0)
  1.2653                  freed += size;
  1.2654          }
  1.2655      } else {
  1.2656 -        USAGE_ERROR_ACTION (ms, ms);
  1.2657 +        USAGE_ERROR_ACTION(ms, ms);
  1.2658      }
  1.2659      return freed;
  1.2660  }
  1.2661 @@ -4605,33 +4600,33 @@
  1.2662  
  1.2663  
  1.2664  void *
  1.2665 -mspace_malloc (mspace msp, size_t bytes)
  1.2666 +mspace_malloc(mspace msp, size_t bytes)
  1.2667  {
  1.2668      mstate ms = (mstate) msp;
  1.2669 -    if (!ok_magic (ms)) {
  1.2670 -        USAGE_ERROR_ACTION (ms, ms);
  1.2671 +    if (!ok_magic(ms)) {
  1.2672 +        USAGE_ERROR_ACTION(ms, ms);
  1.2673          return 0;
  1.2674      }
  1.2675 -    if (!PREACTION (ms)) {
  1.2676 +    if (!PREACTION(ms)) {
  1.2677          void *mem;
  1.2678          size_t nb;
  1.2679          if (bytes <= MAX_SMALL_REQUEST) {
  1.2680              bindex_t idx;
  1.2681              binmap_t smallbits;
  1.2682 -            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
  1.2683 -            idx = small_index (nb);
  1.2684 +            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
  1.2685 +            idx = small_index(nb);
  1.2686              smallbits = ms->smallmap >> idx;
  1.2687  
  1.2688              if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
  1.2689                  mchunkptr b, p;
  1.2690                  idx += ~smallbits & 1;  /* Uses next bin if idx empty */
  1.2691 -                b = smallbin_at (ms, idx);
  1.2692 +                b = smallbin_at(ms, idx);
  1.2693                  p = b->fd;
  1.2694 -                assert (chunksize (p) == small_index2size (idx));
  1.2695 -                unlink_first_small_chunk (ms, b, p, idx);
  1.2696 -                set_inuse_and_pinuse (ms, p, small_index2size (idx));
  1.2697 -                mem = chunk2mem (p);
  1.2698 -                check_malloced_chunk (ms, mem, nb);
  1.2699 +                assert(chunksize(p) == small_index2size(idx));
  1.2700 +                unlink_first_small_chunk(ms, b, p, idx);
  1.2701 +                set_inuse_and_pinuse(ms, p, small_index2size(idx));
  1.2702 +                mem = chunk2mem(p);
  1.2703 +                check_malloced_chunk(ms, mem, nb);
  1.2704                  goto postaction;
  1.2705              }
  1.2706  
  1.2707 @@ -4641,40 +4636,40 @@
  1.2708                      size_t rsize;
  1.2709                      bindex_t i;
  1.2710                      binmap_t leftbits =
  1.2711 -                        (smallbits << idx) & left_bits (idx2bit (idx));
  1.2712 -                    binmap_t leastbit = least_bit (leftbits);
  1.2713 -                    compute_bit2idx (leastbit, i);
  1.2714 -                    b = smallbin_at (ms, i);
  1.2715 +                        (smallbits << idx) & left_bits(idx2bit(idx));
  1.2716 +                    binmap_t leastbit = least_bit(leftbits);
  1.2717 +                    compute_bit2idx(leastbit, i);
  1.2718 +                    b = smallbin_at(ms, i);
  1.2719                      p = b->fd;
  1.2720 -                    assert (chunksize (p) == small_index2size (i));
  1.2721 -                    unlink_first_small_chunk (ms, b, p, i);
  1.2722 -                    rsize = small_index2size (i) - nb;
  1.2723 +                    assert(chunksize(p) == small_index2size(i));
  1.2724 +                    unlink_first_small_chunk(ms, b, p, i);
  1.2725 +                    rsize = small_index2size(i) - nb;
  1.2726                      /* Fit here cannot be remainderless if 4byte sizes */
  1.2727                      if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  1.2728 -                        set_inuse_and_pinuse (ms, p, small_index2size (i));
  1.2729 +                        set_inuse_and_pinuse(ms, p, small_index2size(i));
  1.2730                      else {
  1.2731 -                        set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
  1.2732 -                        r = chunk_plus_offset (p, nb);
  1.2733 -                        set_size_and_pinuse_of_free_chunk (r, rsize);
  1.2734 -                        replace_dv (ms, r, rsize);
  1.2735 +                        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.2736 +                        r = chunk_plus_offset(p, nb);
  1.2737 +                        set_size_and_pinuse_of_free_chunk(r, rsize);
  1.2738 +                        replace_dv(ms, r, rsize);
  1.2739                      }
  1.2740 -                    mem = chunk2mem (p);
  1.2741 -                    check_malloced_chunk (ms, mem, nb);
  1.2742 +                    mem = chunk2mem(p);
  1.2743 +                    check_malloced_chunk(ms, mem, nb);
  1.2744                      goto postaction;
  1.2745                  }
  1.2746  
  1.2747                  else if (ms->treemap != 0
  1.2748 -                         && (mem = tmalloc_small (ms, nb)) != 0) {
  1.2749 -                    check_malloced_chunk (ms, mem, nb);
  1.2750 +                         && (mem = tmalloc_small(ms, nb)) != 0) {
  1.2751 +                    check_malloced_chunk(ms, mem, nb);
  1.2752                      goto postaction;
  1.2753                  }
  1.2754              }
  1.2755          } else if (bytes >= MAX_REQUEST)
  1.2756              nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
  1.2757          else {
  1.2758 -            nb = pad_request (bytes);
  1.2759 -            if (ms->treemap != 0 && (mem = tmalloc_large (ms, nb)) != 0) {
  1.2760 -                check_malloced_chunk (ms, mem, nb);
  1.2761 +            nb = pad_request(bytes);
  1.2762 +            if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
  1.2763 +                check_malloced_chunk(ms, mem, nb);
  1.2764                  goto postaction;
  1.2765              }
  1.2766          }
  1.2767 @@ -4683,37 +4678,37 @@
  1.2768              size_t rsize = ms->dvsize - nb;
  1.2769              mchunkptr p = ms->dv;
  1.2770              if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
  1.2771 -                mchunkptr r = ms->dv = chunk_plus_offset (p, nb);
  1.2772 +                mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
  1.2773                  ms->dvsize = rsize;
  1.2774 -                set_size_and_pinuse_of_free_chunk (r, rsize);
  1.2775 -                set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
  1.2776 +                set_size_and_pinuse_of_free_chunk(r, rsize);
  1.2777 +                set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.2778              } else {            /* exhaust dv */
  1.2779                  size_t dvs = ms->dvsize;
  1.2780                  ms->dvsize = 0;
  1.2781                  ms->dv = 0;
  1.2782 -                set_inuse_and_pinuse (ms, p, dvs);
  1.2783 +                set_inuse_and_pinuse(ms, p, dvs);
  1.2784              }
  1.2785 -            mem = chunk2mem (p);
  1.2786 -            check_malloced_chunk (ms, mem, nb);
  1.2787 +            mem = chunk2mem(p);
  1.2788 +            check_malloced_chunk(ms, mem, nb);
  1.2789              goto postaction;
  1.2790          }
  1.2791  
  1.2792          else if (nb < ms->topsize) {    /* Split top */
  1.2793              size_t rsize = ms->topsize -= nb;
  1.2794              mchunkptr p = ms->top;
  1.2795 -            mchunkptr r = ms->top = chunk_plus_offset (p, nb);
  1.2796 +            mchunkptr r = ms->top = chunk_plus_offset(p, nb);
  1.2797              r->head = rsize | PINUSE_BIT;
  1.2798 -            set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
  1.2799 -            mem = chunk2mem (p);
  1.2800 -            check_top_chunk (ms, ms->top);
  1.2801 -            check_malloced_chunk (ms, mem, nb);
  1.2802 +            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
  1.2803 +            mem = chunk2mem(p);
  1.2804 +            check_top_chunk(ms, ms->top);
  1.2805 +            check_malloced_chunk(ms, mem, nb);
  1.2806              goto postaction;
  1.2807          }
  1.2808  
  1.2809 -        mem = sys_alloc (ms, nb);
  1.2810 +        mem = sys_alloc(ms, nb);
  1.2811  
  1.2812        postaction:
  1.2813 -        POSTACTION (ms);
  1.2814 +        POSTACTION(ms);
  1.2815          return mem;
  1.2816      }
  1.2817  
  1.2818 @@ -4721,43 +4716,43 @@
  1.2819  }
  1.2820  
  1.2821  void
  1.2822 -mspace_free (mspace msp, void *mem)
  1.2823 +mspace_free(mspace msp, void *mem)
  1.2824  {
  1.2825      if (mem != 0) {
  1.2826 -        mchunkptr p = mem2chunk (mem);
  1.2827 +        mchunkptr p = mem2chunk(mem);
  1.2828  #if FOOTERS
  1.2829 -        mstate fm = get_mstate_for (p);
  1.2830 +        mstate fm = get_mstate_for(p);
  1.2831  #else /* FOOTERS */
  1.2832          mstate fm = (mstate) msp;
  1.2833  #endif /* FOOTERS */
  1.2834 -        if (!ok_magic (fm)) {
  1.2835 -            USAGE_ERROR_ACTION (fm, p);
  1.2836 +        if (!ok_magic(fm)) {
  1.2837 +            USAGE_ERROR_ACTION(fm, p);
  1.2838              return;
  1.2839          }
  1.2840 -        if (!PREACTION (fm)) {
  1.2841 -            check_inuse_chunk (fm, p);
  1.2842 -            if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) {
  1.2843 -                size_t psize = chunksize (p);
  1.2844 -                mchunkptr next = chunk_plus_offset (p, psize);
  1.2845 -                if (!pinuse (p)) {
  1.2846 +        if (!PREACTION(fm)) {
  1.2847 +            check_inuse_chunk(fm, p);
  1.2848 +            if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  1.2849 +                size_t psize = chunksize(p);
  1.2850 +                mchunkptr next = chunk_plus_offset(p, psize);
  1.2851 +                if (!pinuse(p)) {
  1.2852                      size_t prevsize = p->prev_foot;
  1.2853                      if ((prevsize & IS_MMAPPED_BIT) != 0) {
  1.2854                          prevsize &= ~IS_MMAPPED_BIT;
  1.2855                          psize += prevsize + MMAP_FOOT_PAD;
  1.2856 -                        if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
  1.2857 +                        if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
  1.2858                              fm->footprint -= psize;
  1.2859                          goto postaction;
  1.2860                      } else {
  1.2861 -                        mchunkptr prev = chunk_minus_offset (p, prevsize);
  1.2862 +                        mchunkptr prev = chunk_minus_offset(p, prevsize);
  1.2863                          psize += prevsize;
  1.2864                          p = prev;
  1.2865 -                        if (RTCHECK (ok_address (fm, prev))) {  /* consolidate backward */
  1.2866 +                        if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
  1.2867                              if (p != fm->dv) {
  1.2868 -                                unlink_chunk (fm, p, prevsize);
  1.2869 +                                unlink_chunk(fm, p, prevsize);
  1.2870                              } else if ((next->head & INUSE_BITS) ==
  1.2871                                         INUSE_BITS) {
  1.2872                                  fm->dvsize = psize;
  1.2873 -                                set_free_with_pinuse (p, psize, next);
  1.2874 +                                set_free_with_pinuse(p, psize, next);
  1.2875                                  goto postaction;
  1.2876                              }
  1.2877                          } else
  1.2878 @@ -4765,8 +4760,8 @@
  1.2879                      }
  1.2880                  }
  1.2881  
  1.2882 -                if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) {
  1.2883 -                    if (!cinuse (next)) {       /* consolidate forward */
  1.2884 +                if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  1.2885 +                    if (!cinuse(next)) {        /* consolidate forward */
  1.2886                          if (next == fm->top) {
  1.2887                              size_t tsize = fm->topsize += psize;
  1.2888                              fm->top = p;
  1.2889 @@ -4775,47 +4770,47 @@
  1.2890                                  fm->dv = 0;
  1.2891                                  fm->dvsize = 0;
  1.2892                              }
  1.2893 -                            if (should_trim (fm, tsize))
  1.2894 -                                sys_trim (fm, 0);
  1.2895 +                            if (should_trim(fm, tsize))
  1.2896 +                                sys_trim(fm, 0);
  1.2897                              goto postaction;
  1.2898                          } else if (next == fm->dv) {
  1.2899                              size_t dsize = fm->dvsize += psize;
  1.2900                              fm->dv = p;
  1.2901 -                            set_size_and_pinuse_of_free_chunk (p, dsize);
  1.2902 +                            set_size_and_pinuse_of_free_chunk(p, dsize);
  1.2903                              goto postaction;
  1.2904                          } else {
  1.2905 -                            size_t nsize = chunksize (next);
  1.2906 +                            size_t nsize = chunksize(next);
  1.2907                              psize += nsize;
  1.2908 -                            unlink_chunk (fm, next, nsize);
  1.2909 -                            set_size_and_pinuse_of_free_chunk (p, psize);
  1.2910 +                            unlink_chunk(fm, next, nsize);
  1.2911 +                            set_size_and_pinuse_of_free_chunk(p, psize);
  1.2912                              if (p == fm->dv) {
  1.2913                                  fm->dvsize = psize;
  1.2914                                  goto postaction;
  1.2915                              }
  1.2916                          }
  1.2917                      } else
  1.2918 -                        set_free_with_pinuse (p, psize, next);
  1.2919 -                    insert_chunk (fm, p, psize);
  1.2920 -                    check_free_chunk (fm, p);
  1.2921 +                        set_free_with_pinuse(p, psize, next);
  1.2922 +                    insert_chunk(fm, p, psize);
  1.2923 +                    check_free_chunk(fm, p);
  1.2924                      goto postaction;
  1.2925                  }
  1.2926              }
  1.2927            erroraction:
  1.2928 -            USAGE_ERROR_ACTION (fm, p);
  1.2929 +            USAGE_ERROR_ACTION(fm, p);
  1.2930            postaction:
  1.2931 -            POSTACTION (fm);
  1.2932 +            POSTACTION(fm);
  1.2933          }
  1.2934      }
  1.2935  }
  1.2936  
  1.2937  void *
  1.2938 -mspace_calloc (mspace msp, size_t n_elements, size_t elem_size)
  1.2939 +mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
  1.2940  {
  1.2941      void *mem;
  1.2942      size_t req = 0;
  1.2943      mstate ms = (mstate) msp;
  1.2944 -    if (!ok_magic (ms)) {
  1.2945 -        USAGE_ERROR_ACTION (ms, ms);
  1.2946 +    if (!ok_magic(ms)) {
  1.2947 +        USAGE_ERROR_ACTION(ms, ms);
  1.2948          return 0;
  1.2949      }
  1.2950      if (n_elements != 0) {
  1.2951 @@ -4824,143 +4819,143 @@
  1.2952              (req / n_elements != elem_size))
  1.2953              req = MAX_SIZE_T;   /* force downstream failure on overflow */
  1.2954      }
  1.2955 -    mem = internal_malloc (ms, req);
  1.2956 -    if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
  1.2957 -        memset (mem, 0, req);
  1.2958 +    mem = internal_malloc(ms, req);
  1.2959 +    if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  1.2960 +        memset(mem, 0, req);
  1.2961      return mem;
  1.2962  }
  1.2963  
  1.2964  void *
  1.2965 -mspace_realloc (mspace msp, void *oldmem, size_t bytes)
  1.2966 +mspace_realloc(mspace msp, void *oldmem, size_t bytes)
  1.2967  {
  1.2968      if (oldmem == 0)
  1.2969 -        return mspace_malloc (msp, bytes);
  1.2970 +        return mspace_malloc(msp, bytes);
  1.2971  #ifdef REALLOC_ZERO_BYTES_FREES
  1.2972      if (bytes == 0) {
  1.2973 -        mspace_free (msp, oldmem);
  1.2974 +        mspace_free(msp, oldmem);
  1.2975          return 0;
  1.2976      }
  1.2977  #endif /* REALLOC_ZERO_BYTES_FREES */
  1.2978      else {
  1.2979  #if FOOTERS
  1.2980 -        mchunkptr p = mem2chunk (oldmem);
  1.2981 -        mstate ms = get_mstate_for (p);
  1.2982 +        mchunkptr p = mem2chunk(oldmem);
  1.2983 +        mstate ms = get_mstate_for(p);
  1.2984  #else /* FOOTERS */
  1.2985          mstate ms = (mstate) msp;
  1.2986  #endif /* FOOTERS */
  1.2987 -        if (!ok_magic (ms)) {
  1.2988 -            USAGE_ERROR_ACTION (ms, ms);
  1.2989 +        if (!ok_magic(ms)) {
  1.2990 +            USAGE_ERROR_ACTION(ms, ms);
  1.2991              return 0;
  1.2992          }
  1.2993 -        return internal_realloc (ms, oldmem, bytes);
  1.2994 +        return internal_realloc(ms, oldmem, bytes);
  1.2995      }
  1.2996  }
  1.2997  
  1.2998  void *
  1.2999 -mspace_memalign (mspace msp, size_t alignment, size_t bytes)
  1.3000 +mspace_memalign(mspace msp, size_t alignment, size_t bytes)
  1.3001  {
  1.3002      mstate ms = (mstate) msp;
  1.3003 -    if (!ok_magic (ms)) {
  1.3004 -        USAGE_ERROR_ACTION (ms, ms);
  1.3005 +    if (!ok_magic(ms)) {
  1.3006 +        USAGE_ERROR_ACTION(ms, ms);
  1.3007          return 0;
  1.3008      }
  1.3009 -    return internal_memalign (ms, alignment, bytes);
  1.3010 +    return internal_memalign(ms, alignment, bytes);
  1.3011  }
  1.3012  
  1.3013  void **
  1.3014 -mspace_independent_calloc (mspace msp, size_t n_elements,
  1.3015 -                           size_t elem_size, void *chunks[])
  1.3016 +mspace_independent_calloc(mspace msp, size_t n_elements,
  1.3017 +                          size_t elem_size, void *chunks[])
  1.3018  {
  1.3019      size_t sz = elem_size;      /* serves as 1-element array */
  1.3020      mstate ms = (mstate) msp;
  1.3021 -    if (!ok_magic (ms)) {
  1.3022 -        USAGE_ERROR_ACTION (ms, ms);
  1.3023 +    if (!ok_magic(ms)) {
  1.3024 +        USAGE_ERROR_ACTION(ms, ms);
  1.3025          return 0;
  1.3026      }
  1.3027 -    return ialloc (ms, n_elements, &sz, 3, chunks);
  1.3028 +    return ialloc(ms, n_elements, &sz, 3, chunks);
  1.3029  }
  1.3030  
  1.3031  void **
  1.3032 -mspace_independent_comalloc (mspace msp, size_t n_elements,
  1.3033 -                             size_t sizes[], void *chunks[])
  1.3034 +mspace_independent_comalloc(mspace msp, size_t n_elements,
  1.3035 +                            size_t sizes[], void *chunks[])
  1.3036  {
  1.3037      mstate ms = (mstate) msp;
  1.3038 -    if (!ok_magic (ms)) {
  1.3039 -        USAGE_ERROR_ACTION (ms, ms);
  1.3040 +    if (!ok_magic(ms)) {
  1.3041 +        USAGE_ERROR_ACTION(ms, ms);
  1.3042          return 0;
  1.3043      }
  1.3044 -    return ialloc (ms, n_elements, sizes, 0, chunks);
  1.3045 +    return ialloc(ms, n_elements, sizes, 0, chunks);
  1.3046  }
  1.3047  
  1.3048  int
  1.3049 -mspace_trim (mspace msp, size_t pad)
  1.3050 +mspace_trim(mspace msp, size_t pad)
  1.3051  {
  1.3052      int result = 0;
  1.3053      mstate ms = (mstate) msp;
  1.3054 -    if (ok_magic (ms)) {
  1.3055 -        if (!PREACTION (ms)) {
  1.3056 -            result = sys_trim (ms, pad);
  1.3057 -            POSTACTION (ms);
  1.3058 +    if (ok_magic(ms)) {
  1.3059 +        if (!PREACTION(ms)) {
  1.3060 +            result = sys_trim(ms, pad);
  1.3061 +            POSTACTION(ms);
  1.3062          }
  1.3063      } else {
  1.3064 -        USAGE_ERROR_ACTION (ms, ms);
  1.3065 +        USAGE_ERROR_ACTION(ms, ms);
  1.3066      }
  1.3067      return result;
  1.3068  }
  1.3069  
  1.3070  void
  1.3071 -mspace_malloc_stats (mspace msp)
  1.3072 +mspace_malloc_stats(mspace msp)
  1.3073  {
  1.3074      mstate ms = (mstate) msp;
  1.3075 -    if (ok_magic (ms)) {
  1.3076 -        internal_malloc_stats (ms);
  1.3077 +    if (ok_magic(ms)) {
  1.3078 +        internal_malloc_stats(ms);
  1.3079      } else {
  1.3080 -        USAGE_ERROR_ACTION (ms, ms);
  1.3081 +        USAGE_ERROR_ACTION(ms, ms);
  1.3082      }
  1.3083  }
  1.3084  
  1.3085  size_t
  1.3086 -mspace_footprint (mspace msp)
  1.3087 +mspace_footprint(mspace msp)
  1.3088  {
  1.3089      size_t result;
  1.3090      mstate ms = (mstate) msp;
  1.3091 -    if (ok_magic (ms)) {
  1.3092 +    if (ok_magic(ms)) {
  1.3093          result = ms->footprint;
  1.3094      }
  1.3095 -    USAGE_ERROR_ACTION (ms, ms);
  1.3096 +    USAGE_ERROR_ACTION(ms, ms);
  1.3097      return result;
  1.3098  }
  1.3099  
  1.3100  
  1.3101  size_t
  1.3102 -mspace_max_footprint (mspace msp)
  1.3103 +mspace_max_footprint(mspace msp)
  1.3104  {
  1.3105      size_t result;
  1.3106      mstate ms = (mstate) msp;
  1.3107 -    if (ok_magic (ms)) {
  1.3108 +    if (ok_magic(ms)) {
  1.3109          result = ms->max_footprint;
  1.3110      }
  1.3111 -    USAGE_ERROR_ACTION (ms, ms);
  1.3112 +    USAGE_ERROR_ACTION(ms, ms);
  1.3113      return result;
  1.3114  }
  1.3115  
  1.3116  
  1.3117  #if !NO_MALLINFO
  1.3118  struct mallinfo
  1.3119 -mspace_mallinfo (mspace msp)
  1.3120 +mspace_mallinfo(mspace msp)
  1.3121  {
  1.3122      mstate ms = (mstate) msp;
  1.3123 -    if (!ok_magic (ms)) {
  1.3124 -        USAGE_ERROR_ACTION (ms, ms);
  1.3125 +    if (!ok_magic(ms)) {
  1.3126 +        USAGE_ERROR_ACTION(ms, ms);
  1.3127      }
  1.3128 -    return internal_mallinfo (ms);
  1.3129 +    return internal_mallinfo(ms);
  1.3130  }
  1.3131  #endif /* NO_MALLINFO */
  1.3132  
  1.3133  int
  1.3134 -mspace_mallopt (int param_number, int value)
  1.3135 +mspace_mallopt(int param_number, int value)
  1.3136  {
  1.3137 -    return change_mparam (param_number, value);
  1.3138 +    return change_mparam(param_number, value);
  1.3139  }
  1.3140  
  1.3141  #endif /* MSPACES */