From patchwork Fri Nov 30 01:08:23 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: He Hongjun X-Patchwork-Id: 30409 Received: (qmail 35931 invoked by alias); 30 Nov 2018 01:08:38 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 35905 invoked by uid 89); 30 Nov 2018 01:08:37 -0000 Authentication-Results: sourceware.org; auth=none X-Spam-SWARE-Status: =?ISO-8859-1?Q?No, score=-14.9 required=5.0 tests=BAYES_00, BODY_8BITS, GARBLED_BODY, GARBLED_FROM, GARBLED_SUBJECT, GIT_PATCH_0, GIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, KAM_SHORT, SPF_PASS, UNPARSEABLE_RELAY autolearn=ham version=3.3.2 spammy==e4=ba=ba=ef=bc, 19992018, 1999-2018, H*M:sk:2018113?= X-HELO: mxhk.zte.com.cn Date: Fri, 30 Nov 2018 09:08:23 +0800 (CST) X-Zmail-TransId: 2b045c008d87bdbbe84d Message-ID: <201811300908239918728@zte.com.cn> In-Reply-To: References: 201811281706560749906@zte.com.cn, 201811291709563275705@zte.com.cn, alpine.DEB.2.21.1811291721100.9148@digraph.polyomino.org.uk Mime-Version: 1.0 From: To: Cc: Subject: =?UTF-8?B?562U5aSNOiBSZTogQnVnwqAxNTMyMcKgLcKgbWFsbG9jL2ZyZWUgY2FuJ3QgZ2l2ZSB0aGUgbWVtb3J5IGJhY2sgdG8ga2VybmVsIHdoZW4gbWFpbl9hcmVuYSBpcyBkaXNjb250aW5vdXM=?= X-MAIL: mse02.zte.com.cn wAU18N3s084416 yes, covered by the one for "Chengdu Zhongxing Software Company Limited (ZTE)" dated 23 August 2018. I have added the copyright notice in the tst-discontinuous-main-arena.c The new patch: 2018-11-28 Hehongjun [BZ #15321] * malloc/Makefile: add tst-discontinuous-main-arena. * malloc/arena.c (heap_trim): add main arean * malloc/malloc.c: modify discontinuous main arena with sysmalloc, modify discontinuous main arena link heap. with _int_free, free memory on discontinuous main arena link heap. * malloc/tst-discontinuous-main-arena: New test. ------------------原始邮件------------------ 发件人:JosephMyers 收件人:贺洪军10036572; 抄送人:libc-alpha@sourceware.org ; 日 期 :2018年11月30日 01:24 主 题 :Re: Bug 15321 - malloc/free can't give the memory back to kernel when main_arena is discontinous On Thu, 29 Nov 2018, He.Hongjun@zte.com.cn wrote: > Hello, I have fixed the Bug 15321. > patch on glibc-2.28 Do you have an FSF copyright assignment? (Are you covered by the one for "Chengdu Zhongxing Software Company Limited (ZTE)" dated 23 August 2018?) > diff --git a/malloc/tst-discontinuous-main-arena.c b/malloc/tst-discontinuous-main-arena.c > old mode 100644 > new mode 100755 > index e69de29..3f6f0e7 > --- a/malloc/tst-discontinuous-main-arena.c > +++ b/malloc/tst-discontinuous-main-arena.c > @@ -0,0 +1,128 @@ > +/* Ensure that main arena of discontinuity can normal free memory. > + > + The GNU C Library is free software; you can redistribute it and/or Missing copyright notice, before the license notice. > +#define TEST_FUNCTION do_test () > +#include "../test-skeleton.c" New tests should use , not the old-style test-skeleton.c. --- Joseph S. Myers joseph@codesourcery.com diff --git a/malloc/Makefile b/malloc/Makefile index 7d54bad..9d60ab1 100644 --- a/malloc/Makefile +++ b/malloc/Makefile @@ -38,6 +38,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \ tst-malloc_info \ tst-malloc-too-large \ tst-malloc-stats-cancellation \ + tst-discontinuous-main-arena \ tests-static := \ tst-interpose-static-nothread \ diff --git a/malloc/arena.c b/malloc/arena.c index 497ae47..343ef13 100755 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -634,6 +634,45 @@ heap_trim (heap_info *heap, size_t pad) /*check_chunk(ar_ptr, top_chunk);*/ } + if (ar_ptr == &main_arena && heap->prev == NULL) + { + char *old_top; + old_top = (char *) (heap + 1); + p = (mchunkptr) (old_top + sizeof (unsigned long)); + + misalign = (unsigned long) chunk2mem (p) & MALLOC_ALIGN_MASK; + if (misalign > 0) + p = ((char *) p) + MALLOC_ALIGNMENT - misalign; + + if (top_chunk == (mchunkptr) p) + { + p = (mchunkptr) (*(unsigned long *) old_top); + p = prev_chunk (p); + new_size = chunksize (p) + MINSIZE; + + if (!prev_inuse (p)) + new_size += prev_size (p); + + if (new_size >= pad + MINSIZE + pagesz) + { + ar_ptr->system_mem -= heap->size; + delete_heap (heap); + if (!prev_inuse (p)) { /* consolidate backward */ + p = prev_chunk (p); + unlink (ar_ptr, p, bck, fwd); + } + + top (ar_ptr) = top_chunk = (char *) p - 2 * SIZE_SZ; + set_head (top_chunk, new_size | PREV_INUSE); + + set_contiguous (ar_ptr); + if ((unsigned long)(chunksize (ar_ptr->top)) >= (unsigned long)mp_.trim_threshold) + systrim (mp_.trim_threshold, ar_ptr); + return 0; + } + } + } + /* Uses similar logic for per-thread arenas as the main arena with systrim and _int_free by preserving the top pad and rounding down to the nearest page. */ diff --git a/malloc/malloc.c b/malloc/malloc.c index e247c77..ad53ef7 100755 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -2388,7 +2388,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE)); - if (av != &main_arena) + if ((av != &main_arena) || (av == &main_arena && !contiguous (av))) { heap_info *old_heap, *heap; size_t old_heap_size; @@ -2438,8 +2438,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) goto try_mmap; } else /* av == main_arena */ - - { /* Request enough space for nb + pad + overhead */ size = nb + mp_.top_pad + MINSIZE; @@ -2491,35 +2489,62 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) and threshold limits, since the space will not be used as a segregated mmap region. */ - - /* Cannot merge with old top, so add its size back in */ - if (contiguous (av)) - size = ALIGN_UP (size + old_size, pagesize); - - /* If we are relying on mmap as backup, then use larger units */ - if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE)) - size = MMAP_AS_MORECORE_SIZE; - - /* Don't try if size wraps around 0 */ - if ((unsigned long) (size) > (unsigned long) (nb)) - { - char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0)); - - if (mbrk != MAP_FAILED) + heap_info *heap; + long misalign; + char *ptr; + heap = new_heap (nb + (MINSIZE + sizeof (*heap) + MALLOC_ALIGNMENT), mp_.top_pad); + if (!heap) + { + if (!tried_mmap) + goto try_mmap; + else { - /* We do not need, and cannot use, another sbrk call to find end */ - brk = mbrk; - snd_brk = brk + size; + __set_errno (ENOMEM); + return 0; + } + } + + heap->ar_ptr = av; + heap->prev = NULL; + av->system_mem += heap->size; + + ptr = (char *) (heap + 1); + ptr += sizeof (unsigned long); + misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; + if (misalign > 0) + ptr += MALLOC_ALIGNMENT - misalign; + /* Set up the new top. */ + top (av) = ptr; + set_head (top (av), (((char *) heap + heap->size) - ptr) | PREV_INUSE); - /* - Record that we no longer have a contiguous sbrk region. - After the first time mmap is used as backup, we do not - ever rely on contiguous space since this could incorrectly - bridge regions. - */ - set_noncontiguous (av); - } + /* Setup fencepost and free the old top chunk with a multiple of + MALLOC_ALIGNMENT in size. */ + /* The fencepost takes at least MINSIZE bytes, because it might + become the top chunk again later. Note that a footer is set + up, too, although the chunk is marked in use. */ + old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; + set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); + *(unsigned long *)((heap + 1)) = chunk_at_offset (old_top, old_size + 2 * SIZE_SZ); + if (old_size >= MINSIZE) + { + set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); + set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); + set_head (old_top, old_size | PREV_INUSE); + _int_free (av, old_top, 1); + } + else + { + set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); + set_foot (old_top, (old_size + 2 * SIZE_SZ)); } + + /* + Record that we no longer have a contiguous sbrk region. + After the first time mmap is used as backup, we do not + ever rely on contiguous space since this could incorrectly + bridge regions. + */ + set_noncontiguous (av); } if (brk != (char *) (MORECORE_FAILURE)) @@ -4347,7 +4372,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) if (atomic_load_relaxed (&av->have_fastchunks)) malloc_consolidate(av); - if (av == &main_arena) { + if ((av == &main_arena ) && contiguous (av)) { #ifndef MORECORE_CANNOT_TRIM if ((unsigned long)(chunksize(av->top)) >= (unsigned long)(mp_.trim_threshold)) diff --git a/malloc/tst-discontinuous-main-arena.c b/malloc/tst-discontinuous-main-arena.c old mode 100644 new mode 100755 index e69de29..ef7249f --- a/malloc/tst-discontinuous-main-arena.c +++ b/malloc/tst-discontinuous-main-arena.c @@ -0,0 +1,133 @@ +/* Copyright (C) 1999-2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by He.Hongjun@zte.com.cn, 2018. + + Ensure that main arena of discontinuity can normal free memory. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, see . */ + + +#include +#include +#include +#include +#include + +#define MEM_SIZE 1024*128 +#define BLK_SIZE 1024 +#define TOP_PAD_SIZE 1024 * 128 + +static int +do_test (void) +{ + /* Get the peak physical memory usage. */ + FILE *f; + unsigned n_rss = 0; + unsigned n_resident = 0; + unsigned n_share = 0; + unsigned n_text = 0; + unsigned n_lib = 0; + unsigned n_data = 0; + unsigned n_dt = 0; + int i = 0; + + f = fopen ("/proc/self/statm", "r"); + if (f) + { + fscanf (f,"%u %u %u %u %u %u %u", + &n_rss,&n_resident,&n_share,&n_text,&n_lib,&n_data,&n_dt); + printf ("n_rss=%d\n", n_rss); + printf ("n_resident=%d\n", n_resident); + fclose (f); + } + else + return -1; + + void **mem1 = (void**) malloc (sizeof (void*) * MEM_SIZE); + void **mem2 = (void**) malloc (sizeof (void*) * MEM_SIZE); + + for (i = 0; i < MEM_SIZE; i++) + { + mem1[i] = malloc (BLK_SIZE); + if (mem1[i] == NULL) + { + printf ("malloc(BLK_SIZE) failed.\n"); + return -1; + } + memset (mem1[i], 0xa, BLK_SIZE); + } + + /* Place a fence in front of the program break to make the main arena + discoutinuous. Make sure we do not cover any region that have been + accquire by sbrk. */ + long page_sz = sysconf (_SC_PAGESIZE); + void *fence_addr = mem1[MEM_SIZE - 1] + TOP_PAD_SIZE + BLK_SIZE + page_sz; + fence_addr = (void *)((long)fence_addr & ~(page_sz - 1)); + + void *tt = mmap (fence_addr, page_sz, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (tt == MAP_FAILED || tt != fence_addr) + { + printf ("mmap(fence_addr, page_sz, PROT_READ | PROT_WRITE, \ + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) failed.\n"); + return -1; + } + + for (i = 0; i < MEM_SIZE; i++) + { + mem2[i] = malloc (BLK_SIZE); + if (mem2[i] == NULL) + { + printf ("malloc(BLK_SIZE) failed.\n"); + return -1; + } + memset (mem2[i], 0xb, BLK_SIZE); + } + + for (i = 0; i < MEM_SIZE; i++) + { + free (mem1[i]); + mem1[i] = NULL; + } + free (mem1); + + for (i = 0; i < MEM_SIZE; i++) + { + free (mem2[i]); + mem2[i] = NULL; + } + free (mem2); + + f = fopen ("/proc/self/statm", "r"); + if (f) + { + fscanf (f,"%u %u %u %u %u %u %u", + &n_rss,&n_resident,&n_share,&n_text,&n_lib,&n_data,&n_dt); + printf ("### n_rss=%d\n", n_rss); + printf ("### n_resident=%d\n", n_resident); + fclose (f); + } + else + return -1; + + if (n_resident > 1000) + return -1; + + return 0; +} + +#define TEST_FUNCTION do_test () +#include "../test-skeleton.c" +