From patchwork Mon Nov 23 15:42:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Earnshaw X-Patchwork-Id: 41156 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id 1385F38708A8; Mon, 23 Nov 2020 15:43:03 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 1385F38708A8 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1606146183; bh=rpo3QQFndB3GxGgI39Habk/9JaxuKB5CjaSKLbiYyFg=; h=To:Subject:Date:In-Reply-To:References:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To:Cc: From; b=B5aPI/aYUWnwX0LF5kdU2iYBPVtcIFT5fjY9s7Oq7Zqu+dqbxXbkH0T9Pp+y9gqUS xt0Pq7jSPVck1M1Z9rfwBs6amZ/2YJc/iWpLuTIN3UlMI/+fWjQ9dDwg3SwtWasX1e MnulDYmioFyPBgFkDt0+kcBbhsewoy/1ffjqwlX4= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by sourceware.org (Postfix) with ESMTP id 1AC56386F825 for ; Mon, 23 Nov 2020 15:42:58 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.3.2 sourceware.org 1AC56386F825 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D81271534; Mon, 23 Nov 2020 07:42:57 -0800 (PST) Received: from eagle.buzzard.freeserve.co.uk (unknown [172.31.20.19]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 0F2373F718; Mon, 23 Nov 2020 07:42:56 -0800 (PST) To: libc-alpha@sourceware.org Subject: [PATCH v3 5/8] malloc: support MALLOC_CHECK_ in conjunction with _MTAG_ENABLE. Date: Mon, 23 Nov 2020 15:42:33 +0000 Message-Id: <20201123154236.25809-6-rearnsha@arm.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201123154236.25809-1-rearnsha@arm.com> References: <20201123154236.25809-1-rearnsha@arm.com> MIME-Version: 1.0 X-Spam-Status: No, score=-14.0 required=5.0 tests=BAYES_00, GIT_PATCH_0, KAM_DMARC_STATUS, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.2 X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Richard Earnshaw via Libc-alpha From: Richard Earnshaw Reply-To: Richard Earnshaw Cc: Richard Earnshaw Errors-To: libc-alpha-bounces@sourceware.org Sender: "Libc-alpha" Note, I propose that this patch will be merged into the main malloc changes before committing, it is kept separate here to simplify reviewing. --- malloc/arena.c | 16 ++++++-- malloc/hooks.c | 85 ++++++++++++++++++++++++--------------- malloc/malloc.c | 104 +++++++++++++++++++++++++++++------------------- 3 files changed, 127 insertions(+), 78 deletions(-) diff --git a/malloc/arena.c b/malloc/arena.c index e348b10978..da4bdc9d51 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -290,14 +290,22 @@ libc_hidden_proto (_dl_open_hook); #ifdef _LIBC_MTAG /* Generate a new (random) tag value for PTR and tag the memory it - points to upto __malloc_usable_size (PTR). Return the newly tagged - pointer. */ + points to upto the end of the usable size for the chunk containing + it. Return the newly tagged pointer. */ static void * __mtag_tag_new_usable (void *ptr) { if (ptr) - ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), - __malloc_usable_size (ptr)); + { + mchunkptr cp = mem2chunk(ptr); + /* This likely will never happen, but we can't handle retagging + chunks from the dumped main arena. So just return the + existing pointer. */ + if (DUMPED_MAIN_ARENA_CHUNK (cp)) + return ptr; + ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), + CHUNK_AVAILABLE_SIZE (cp) - CHUNK_HDR_SZ); + } return ptr; } diff --git a/malloc/hooks.c b/malloc/hooks.c index 52bb3863cd..c6d7ed774f 100644 --- a/malloc/hooks.c +++ b/malloc/hooks.c @@ -56,12 +56,6 @@ static int using_malloc_checking; void __malloc_check_init (void) { -#if HAVE_TUNABLES && defined (_LIBC_MTAG) - /* If memory tagging is enabled, there is no need for the boundary - checking cookie as well. */ - if ((TUNABLE_GET_FULL (glibc, memtag, enable, int32_t, NULL) & 1) != 0) - return; -#endif using_malloc_checking = 1; __malloc_hook = malloc_check; __free_hook = free_check; @@ -69,6 +63,13 @@ __malloc_check_init (void) __memalign_hook = memalign_check; } +/* When memory is tagged, the checking data is stored in the user part + of the chunk. We can't rely on the user not having modified the + tags, so fetch the tag at each location before dereferencing + it. */ +#define SAFE_CHAR_OFFSET(p,offset) \ + ((unsigned char *) TAG_AT (((unsigned char *) p) + offset)) + /* A simple, standard set of debugging hooks. Overhead is `only' one byte per chunk; still this will catch most cases of double frees or overruns. The goal here is to avoid obscure crashes due to invalid @@ -86,7 +87,6 @@ magicbyte (const void *p) return magic; } - /* Visualize the chunk as being partitioned into blocks of 255 bytes from the highest address of the chunk, downwards. The end of each block tells us the size of that block, up to the actual size of the requested @@ -102,16 +102,16 @@ malloc_check_get_size (mchunkptr p) assert (using_malloc_checking == 1); - for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ); - (c = ((unsigned char *) p)[size]) != magic; + for (size = CHUNK_AVAILABLE_SIZE (p) - 1; + (c = *SAFE_CHAR_OFFSET (p, size)) != magic; size -= c) { - if (c <= 0 || size < (c + 2 * SIZE_SZ)) + if (c <= 0 || size < (c + CHUNK_HDR_SZ)) malloc_printerr ("malloc_check_get_size: memory corruption"); } /* chunk2mem size. */ - return size - 2 * SIZE_SZ; + return size - CHUNK_HDR_SZ; } /* Instrument a chunk with overrun detector byte(s) and convert it @@ -130,9 +130,8 @@ mem2mem_check (void *ptr, size_t req_sz) p = mem2chunk (ptr); magic = magicbyte (p); - max_sz = chunksize (p) - 2 * SIZE_SZ; - if (!chunk_is_mmapped (p)) - max_sz += SIZE_SZ; + max_sz = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ; + for (i = max_sz - 1; i > req_sz; i -= block_sz) { block_sz = MIN (i - req_sz, 0xff); @@ -141,9 +140,9 @@ mem2mem_check (void *ptr, size_t req_sz) if (block_sz == magic) --block_sz; - m_ptr[i] = block_sz; + *SAFE_CHAR_OFFSET (m_ptr, i) = block_sz; } - m_ptr[req_sz] = magic; + *SAFE_CHAR_OFFSET (m_ptr, req_sz) = magic; return (void *) m_ptr; } @@ -176,9 +175,11 @@ mem2chunk_check (void *mem, unsigned char **magic_p) next_chunk (prev_chunk (p)) != p))) return NULL; - for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c) + for (sz = CHUNK_AVAILABLE_SIZE (p) - 1; + (c = *SAFE_CHAR_OFFSET (p, sz)) != magic; + sz -= c) { - if (c == 0 || sz < (c + 2 * SIZE_SZ)) + if (c == 0 || sz < (c + CHUNK_HDR_SZ)) return NULL; } } @@ -199,15 +200,19 @@ mem2chunk_check (void *mem, unsigned char **magic_p) ((prev_size (p) + sz) & page_mask) != 0) return NULL; - for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c) + for (sz = CHUNK_AVAILABLE_SIZE (p) - 1; + (c = *SAFE_CHAR_OFFSET (p, sz)) != magic; + sz -= c) { - if (c == 0 || sz < (c + 2 * SIZE_SZ)) + if (c == 0 || sz < (c + CHUNK_HDR_SZ)) return NULL; } } - ((unsigned char *) p)[sz] ^= 0xFF; + + unsigned char* safe_p = SAFE_CHAR_OFFSET (p, sz); + *safe_p ^= 0xFF; if (magic_p) - *magic_p = (unsigned char *) p + sz; + *magic_p = safe_p; return p; } @@ -244,7 +249,7 @@ malloc_check (size_t sz, const void *caller) top_check (); victim = _int_malloc (&main_arena, nb); __libc_lock_unlock (main_arena.mutex); - return mem2mem_check (victim, sz); + return mem2mem_check (TAG_NEW_USABLE (victim), sz); } static void @@ -255,6 +260,12 @@ free_check (void *mem, const void *caller) if (!mem) return; +#ifdef _LIBC_MTAG + /* Quickly check that the freed pointer matches the tag for the memory. + This gives a useful double-free detection. */ + *(volatile char *)mem; +#endif + __libc_lock_lock (main_arena.mutex); p = mem2chunk_check (mem, NULL); if (!p) @@ -265,6 +276,8 @@ free_check (void *mem, const void *caller) munmap_chunk (p); return; } + /* Mark the chunk as belonging to the library again. */ + (void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); _int_free (&main_arena, p, 1); __libc_lock_unlock (main_arena.mutex); } @@ -272,7 +285,7 @@ free_check (void *mem, const void *caller) static void * realloc_check (void *oldmem, size_t bytes, const void *caller) { - INTERNAL_SIZE_T nb; + INTERNAL_SIZE_T chnb; void *newmem = 0; unsigned char *magic_p; size_t rb; @@ -290,14 +303,21 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) free_check (oldmem, NULL); return NULL; } + +#ifdef _LIBC_MTAG + /* Quickly check that the freed pointer matches the tag for the memory. + This gives a useful double-free detection. */ + *(volatile char *)oldmem; +#endif + __libc_lock_lock (main_arena.mutex); const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p); __libc_lock_unlock (main_arena.mutex); if (!oldp) malloc_printerr ("realloc(): invalid pointer"); - const INTERNAL_SIZE_T oldsize = chunksize (oldp); + const INTERNAL_SIZE_T oldchsize = CHUNK_AVAILABLE_SIZE (oldp); - if (!checked_request2size (rb, &nb)) + if (!checked_request2size (rb, &chnb)) goto invert; __libc_lock_lock (main_arena.mutex); @@ -305,14 +325,13 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) if (chunk_is_mmapped (oldp)) { #if HAVE_MREMAP - mchunkptr newp = mremap_chunk (oldp, nb); + mchunkptr newp = mremap_chunk (oldp, chnb); if (newp) newmem = chunk2mem (newp); else #endif { - /* Note the extra SIZE_SZ overhead. */ - if (oldsize - SIZE_SZ >= nb) + if (oldchsize >= chnb) newmem = oldmem; /* do nothing */ else { @@ -321,7 +340,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) newmem = _int_malloc (&main_arena, rb); if (newmem) { - memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ); + memcpy (newmem, oldmem, oldchsize - CHUNK_HDR_SZ); munmap_chunk (oldp); } } @@ -330,7 +349,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) else { top_check (); - newmem = _int_realloc (&main_arena, oldp, oldsize, nb); + newmem = _int_realloc (&main_arena, oldp, oldchsize, chnb); } DIAG_PUSH_NEEDS_COMMENT; @@ -349,7 +368,7 @@ invert: __libc_lock_unlock (main_arena.mutex); - return mem2mem_check (newmem, bytes); + return mem2mem_check (TAG_NEW_USABLE (newmem), bytes); } static void * @@ -391,7 +410,7 @@ memalign_check (size_t alignment, size_t bytes, const void *caller) top_check (); mem = _int_memalign (&main_arena, alignment, bytes + 1); __libc_lock_unlock (main_arena.mutex); - return mem2mem_check (mem, bytes); + return mem2mem_check (TAG_NEW_USABLE (mem), bytes); } #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25) diff --git a/malloc/malloc.c b/malloc/malloc.c index deabeb010b..bd78d4bbc6 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -425,9 +425,10 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore; void *TAG_NEW_USABLE (void *ptr) - Allocate a new random color and use it to color the region of - memory starting at PTR and of size __malloc_usable_size() with that - color. Returns PTR suitably recolored for accessing the memory there. + Allocate a new random color and use it to color the user region of + a chunk; this may include data from the subsequent chunk's header + if tagging is sufficiently fine grained. Returns PTR suitably + recolored for accessing the memory there. void *TAG_AT (void *ptr) @@ -1289,15 +1290,19 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ chunk2rawmem() can be used in these cases. */ +/* The chunk header is two SIZE_SZ elements, but this is used widely, so + we define it here for clarity later. */ +#define CHUNK_HDR_SZ (2 * SIZE_SZ) + /* Convert a user mem pointer to a chunk address without correcting the tag. */ -#define chunk2rawmem(p) ((void*)((char*)(p) + 2*SIZE_SZ)) +#define chunk2rawmem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ)) /* Convert between user mem pointers and chunk pointers, updating any memory tags on the pointer to respect the tag value at that location. */ -#define chunk2mem(p) ((void*)TAG_AT (((char*)(p) + 2*SIZE_SZ))) -#define mem2chunk(mem) ((mchunkptr)TAG_AT (((char*)(mem) - 2*SIZE_SZ))) +#define chunk2mem(p) ((void*)TAG_AT (((char*)(p) + CHUNK_HDR_SZ))) +#define mem2chunk(mem) ((mchunkptr)TAG_AT (((char*)(mem) - CHUNK_HDR_SZ))) /* The smallest possible chunk */ #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) @@ -1312,7 +1317,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0) #define misaligned_chunk(p) \ - ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \ + ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \ & MALLOC_ALIGN_MASK) /* pad request bytes into a usable size -- internal version */ @@ -1323,6 +1328,17 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ MINSIZE : \ ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) +/* Available size of chunk. This is the size of the real usable data + in the chunk, plus the chunk header. */ +#ifdef _LIBC_MTAG +#define CHUNK_AVAILABLE_SIZE(p) \ + ((chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) \ + & __mtag_granule_mask) +#else +#define CHUNK_AVAILABLE_SIZE(p) \ + (chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) +#endif + /* Check if REQ overflows when padded and aligned and if the resulting value is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in case the value is less than MINSIZE on SZ or false if any of the previous @@ -1442,7 +1458,6 @@ checked_request2size (size_t req, size_t *sz) __nonnull (1) /* Set size at footer (only when chunk is not in use) */ #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s)) - #pragma GCC poison mchunk_size #pragma GCC poison mchunk_prev_size @@ -1538,7 +1553,7 @@ typedef struct malloc_chunk *mbinptr; #define NBINS 128 #define NSMALLBINS 64 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT -#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ) +#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ) #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) #define in_smallbin_range(sz) \ @@ -2438,7 +2453,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) See the front_misalign handling below, for glibc there is no need for further alignments unless we have have high alignment. */ - if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) + if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) size = ALIGN_UP (nb + SIZE_SZ, pagesize); else size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); @@ -2460,11 +2475,13 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) address argument for later munmap in free() and realloc(). */ - if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) + if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) { - /* For glibc, chunk2rawmem increases the address by 2*SIZE_SZ and - MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page - aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ + /* For glibc, chunk2rawmem increases the address by + CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is + CHUNK_HDR_SZ-1. Each mmap'ed area is page + aligned and therefore definitely + MALLOC_ALIGN_MASK-aligned. */ assert (((INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK) == 0); front_misalign = 0; } @@ -2557,18 +2574,20 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) become the top chunk again later. Note that a footer is set up, too, although the chunk is marked in use. */ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; - set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); + set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ), + 0 | PREV_INUSE); if (old_size >= MINSIZE) { - set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); - set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); + set_head (chunk_at_offset (old_top, old_size), + CHUNK_HDR_SZ | PREV_INUSE); + set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ); set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); _int_free (av, old_top, 1); } else { - set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); - set_foot (old_top, (old_size + 2 * SIZE_SZ)); + set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE); + set_foot (old_top, (old_size + CHUNK_HDR_SZ)); } } else if (!tried_mmap) @@ -2770,7 +2789,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) /* handle non-contiguous cases */ else { - if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) + if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) /* MORECORE/mmap must correctly align */ assert (((unsigned long) chunk2rawmem (brk) & MALLOC_ALIGN_MASK) == 0); else @@ -2820,7 +2839,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) multiple of MALLOC_ALIGNMENT. We know there is at least enough space in old_top to do this. */ - old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK; + old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK; set_head (old_top, old_size | PREV_INUSE); /* @@ -2830,9 +2849,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) lost. */ set_head (chunk_at_offset (old_top, old_size), - (2 * SIZE_SZ) | PREV_INUSE); - set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), - (2 * SIZE_SZ) | PREV_INUSE); + CHUNK_HDR_SZ | PREV_INUSE); + set_head (chunk_at_offset (old_top, + old_size + CHUNK_HDR_SZ), + CHUNK_HDR_SZ | PREV_INUSE); /* If possible, release the rest. */ if (old_size >= MINSIZE) @@ -3259,7 +3279,7 @@ __libc_free (void *mem) p = mem2chunk (mem); /* Mark the chunk as belonging to the library again. */ - (void)TAG_REGION (chunk2rawmem (p), __malloc_usable_size (mem)); + (void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); if (chunk_is_mmapped (p)) /* release mmapped memory. */ { @@ -3358,7 +3378,7 @@ __libc_realloc (void *oldmem, size_t bytes) return NULL; /* Copy as many bytes as are available from the old chunk and fit into the new size. NB: The overhead for faked - mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for + mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for regular mmapped chunks. */ if (bytes > oldsize - SIZE_SZ) bytes = oldsize - SIZE_SZ; @@ -3390,7 +3410,7 @@ __libc_realloc (void *oldmem, size_t bytes) if (newmem == 0) return 0; /* propagate failure */ - memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ); + memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ); munmap_chunk (oldp); return newmem; } @@ -3629,13 +3649,13 @@ __libc_calloc (size_t n, size_t elem_size) if (mem == 0) return 0; + mchunkptr p = mem2chunk (mem); /* If we are using memory tagging, then we need to set the tags regardless of MORECORE_CLEARS, so we zero the whole block while doing so. */ #ifdef _LIBC_MTAG - return TAG_NEW_MEMSET (mem, 0, __malloc_usable_size (mem)); + return TAG_NEW_MEMSET (mem, 0, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); #else - mchunkptr p = mem2chunk (mem); INTERNAL_SIZE_T csz = chunksize (p); /* Two optional cases in which clearing not necessary */ @@ -3927,10 +3947,10 @@ _int_malloc (mstate av, size_t bytes) size = chunksize (victim); mchunkptr next = chunk_at_offset (victim, size); - if (__glibc_unlikely (size <= 2 * SIZE_SZ) + if (__glibc_unlikely (size <= CHUNK_HDR_SZ) || __glibc_unlikely (size > av->system_mem)) malloc_printerr ("malloc(): invalid size (unsorted)"); - if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ) + if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ) || __glibc_unlikely (chunksize_nomask (next) > av->system_mem)) malloc_printerr ("malloc(): invalid next size (unsorted)"); if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size)) @@ -4429,7 +4449,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) ) { if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size)) - <= 2 * SIZE_SZ, 0) + <= CHUNK_HDR_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { @@ -4440,7 +4460,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) if (!have_lock) { __libc_lock_lock (av->mutex); - fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ + fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem); __libc_lock_unlock (av->mutex); } @@ -4449,7 +4469,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) malloc_printerr ("free(): invalid next size (fast)"); } - free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); + free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ); atomic_store_relaxed (&av->have_fastchunks, true); unsigned int idx = fastbin_index(size); @@ -4518,11 +4538,11 @@ _int_free (mstate av, mchunkptr p, int have_lock) malloc_printerr ("double free or corruption (!prev)"); nextsize = chunksize(nextchunk); - if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0) + if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0) || __builtin_expect (nextsize >= av->system_mem, 0)) malloc_printerr ("free(): invalid next size (normal)"); - free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); + free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ); /* consolidate backward */ if (!prev_inuse(p)) { @@ -4753,7 +4773,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, unsigned long remainder_size; /* its size */ /* oldmem size */ - if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0) + if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0) || __builtin_expect (oldsize >= av->system_mem, 0)) malloc_printerr ("realloc(): invalid old size"); @@ -4764,7 +4784,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, next = chunk_at_offset (oldp, oldsize); INTERNAL_SIZE_T nextsize = chunksize (next); - if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0) + if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0) || __builtin_expect (nextsize >= av->system_mem, 0)) malloc_printerr ("realloc(): invalid next size"); @@ -4821,7 +4841,8 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, { void *oldmem = chunk2mem (oldp); newmem = TAG_NEW_USABLE (newmem); - memcpy (newmem, oldmem, __malloc_usable_size (oldmem)); + memcpy (newmem, oldmem, + CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ); (void) TAG_REGION (chunk2rawmem (oldp), oldsize); _int_free (av, oldp, 1); check_inuse_chunk (av, newp); @@ -4987,7 +5008,8 @@ mtrim (mstate av, size_t pad) + sizeof (struct malloc_chunk) + psm1) & ~psm1); - assert ((char *) chunk2rawmem (p) + 4 * SIZE_SZ <= paligned_mem); + assert ((char *) chunk2rawmem (p) + 2 * CHUNK_HDR_SZ + <= paligned_mem); assert ((char *) p + size > paligned_mem); /* This is the size we could potentially free. */ @@ -5062,7 +5084,7 @@ musable (void *mem) if (DUMPED_MAIN_ARENA_CHUNK (p)) result = chunksize (p) - SIZE_SZ; else - result = chunksize (p) - 2 * SIZE_SZ; + result = chunksize (p) - CHUNK_HDR_SZ; } else if (inuse (p)) result = chunksize (p) - SIZE_SZ;