@@ -274,17 +274,36 @@ next_env_entry (char ***position)
#endif
-#ifdef SHARED
+#if defined(SHARED) || defined(_LIBC_MTAG)
static void *
__failing_morecore (ptrdiff_t d)
{
return (void *) MORECORE_FAILURE;
}
+#endif
+#ifdef SHARED
extern struct dl_open_hook *_dl_open_hook;
libc_hidden_proto (_dl_open_hook);
#endif
+#ifdef _LIBC_MTAG
+static void *
+__mtag_tag_new_usable (void *ptr)
+{
+ if (ptr)
+ ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr),
+ __malloc_usable_size (ptr));
+ return ptr;
+}
+
+static void *
+__mtag_tag_new_memset (void *ptr, int val, size_t size)
+{
+ return __libc_mtag_memset_with_tag (__libc_mtag_new_tag (ptr), val, size);
+}
+#endif
+
static void
ptmalloc_init (void)
{
@@ -293,6 +312,24 @@ ptmalloc_init (void)
__malloc_initialized = 0;
+#ifdef _LIBC_MTAG
+ if ((TUNABLE_GET_FULL (glibc, memtag, enable, int32_t, NULL) & 1) != 0)
+ {
+ /* If the environment says that we should be using tagged memory
+ and that morecore does not support tagged regions, then
+ disable it. */
+ if (__MTAG_SBRK_UNTAGGED)
+ __morecore = __failing_morecore;
+
+ __mtag_mmap_flags = __MTAG_MMAP_FLAGS;
+ __tag_new_memset = __mtag_tag_new_memset;
+ __tag_region = __libc_mtag_tag_region;
+ __tag_new_usable = __mtag_tag_new_usable;
+ __tag_at = __libc_mtag_address_get_tag;
+ __mtag_granule_mask = ~(size_t)(__MTAG_GRANULE_SIZE - 1);
+ }
+#endif
+
#ifdef SHARED
/* In case this libc copy is in a non-default namespace, never use brk.
Likewise if dlopened from statically linked program. */
@@ -512,7 +549,7 @@ new_heap (size_t size, size_t top_pad)
}
}
}
- if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
+ if (__mprotect (p2, size, MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE) != 0)
{
__munmap (p2, HEAP_MAX_SIZE);
return 0;
@@ -542,7 +579,7 @@ grow_heap (heap_info *h, long diff)
{
if (__mprotect ((char *) h + h->mprotect_size,
(unsigned long) new_size - h->mprotect_size,
- PROT_READ | PROT_WRITE) != 0)
+ MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE) != 0)
return -2;
h->mprotect_size = new_size;
@@ -56,6 +56,12 @@ static int using_malloc_checking;
void
__malloc_check_init (void)
{
+#if HAVE_TUNABLES && defined (_LIBC_MTAG)
+ /* If memory tagging is enabled, there is no need for the boundary
+ checking cookie as well. */
+ if ((TUNABLE_GET_FULL (glibc, memtag, enable, int32_t, NULL) & 1) != 0)
+ return;
+#endif
using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
@@ -242,6 +242,9 @@
/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
#include <libc-diag.h>
+/* For memory tagging. */
+#include <libc-mtag.h>
+
#include <malloc/malloc-internal.h>
/* For SINGLE_THREAD_P. */
@@ -378,6 +381,52 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
void * __default_morecore (ptrdiff_t);
void *(*__morecore)(ptrdiff_t) = __default_morecore;
+#ifdef _LIBC_MTAG
+static void *
+__default_tag_region (void *ptr, size_t size)
+{
+ return ptr;
+}
+
+static void *
+__default_tag_nop (void *ptr)
+{
+ return ptr;
+}
+
+static int __mtag_mmap_flags = 0;
+static size_t __mtag_granule_mask = ~(size_t)0;
+
+static void *(*__tag_new_memset)(void *, int, size_t) = memset;
+static void *(*__tag_region)(void *, size_t) = __default_tag_region;
+static void *(*__tag_new_usable)(void *) = __default_tag_nop;
+static void *(*__tag_at)(void *) = __default_tag_nop;
+
+# define TAG_NEW_MEMSET(ptr, val, size) __tag_new_memset (ptr, val, size)
+# define TAG_REGION(ptr, size) __tag_region (ptr, size)
+# define TAG_NEW_USABLE(ptr) __tag_new_usable (ptr)
+# define TAG_AT(ptr) __tag_at (ptr)
+#else
+# define TAG_NEW_MEMSET(ptr, val, size) memset (ptr, val, size)
+# define TAG_REGION(ptr, size) (ptr)
+# define TAG_NEW_USABLE(ptr) (ptr)
+# define TAG_AT(ptr) (ptr)
+#endif
+
+/* When using tagged memory, we cannot share the end of the user block
+ with the header for the next chunk, so ensure that we allocate
+ blocks that are rounded up to the granule size. Take care not to
+ overflow from close to MAX_SIZE_T to a small number. */
+static inline size_t
+ROUND_UP_ALLOCATION_SIZE(size_t bytes)
+{
+#ifdef _LIBC_MTAG
+ return (bytes + ~__mtag_granule_mask) & __mtag_granule_mask;
+#else
+ return bytes;
+#endif
+}
+
#include <string.h>
@@ -1184,8 +1233,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* conversion from malloc headers to user pointers, and back */
-#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+#define chunk2mem(p) ((void*)TAG_AT (((char*)(p) + 2*SIZE_SZ)))
+#define chunk2rawmem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
+#define mem2chunk(mem) ((mchunkptr)TAG_AT (((char*)(mem) - 2*SIZE_SZ)))
/* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
@@ -1219,6 +1269,7 @@ checked_request2size (size_t req, size_t *sz) __nonnull (1)
{
if (__glibc_unlikely (req > PTRDIFF_MAX))
return false;
+ req = ROUND_UP_ALLOCATION_SIZE (req);
*sz = request2size (req);
return true;
}
@@ -1964,7 +2015,7 @@ do_check_chunk (mstate av, mchunkptr p)
/* chunk is page-aligned */
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
- assert (aligned_OK (chunk2mem (p)));
+ assert (aligned_OK (chunk2rawmem (p)));
}
}
@@ -1988,7 +2039,7 @@ do_check_free_chunk (mstate av, mchunkptr p)
if ((unsigned long) (sz) >= MINSIZE)
{
assert ((sz & MALLOC_ALIGN_MASK) == 0);
- assert (aligned_OK (chunk2mem (p)));
+ assert (aligned_OK (chunk2rawmem (p)));
/* ... matching footer field */
assert (prev_size (next_chunk (p)) == sz);
/* ... and is fully consolidated */
@@ -2067,7 +2118,7 @@ do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
assert ((sz & MALLOC_ALIGN_MASK) == 0);
assert ((unsigned long) (sz) >= MINSIZE);
/* ... and alignment */
- assert (aligned_OK (chunk2mem (p)));
+ assert (aligned_OK (chunk2rawmem (p)));
/* chunk is less than MINSIZE more than request */
assert ((long) (sz) - (long) (s) >= 0);
assert ((long) (sz) - (long) (s + MINSIZE) < 0);
@@ -2322,7 +2373,8 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* Don't try if size wraps around 0 */
if ((unsigned long) (size) > (unsigned long) (nb))
{
- mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
+ mm = (char *) (MMAP (0, size,
+ MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE, 0));
if (mm != MAP_FAILED)
{
@@ -2336,14 +2388,14 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
{
- /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
+ /* For glibc, chunk2rawmem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
+ assert (((INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK) == 0);
front_misalign = 0;
}
else
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (mm) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
correction = MALLOC_ALIGNMENT - front_misalign;
@@ -2515,7 +2567,9 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* Don't try if size wraps around 0 */
if ((unsigned long) (size) > (unsigned long) (nb))
{
- char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
+ char *mbrk = (char *) (MMAP (0, size,
+ MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE,
+ 0));
if (mbrk != MAP_FAILED)
{
@@ -2586,7 +2640,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* Guarantee alignment of first new chunk made from this space */
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
@@ -2644,10 +2698,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
/* MORECORE/mmap must correctly align */
- assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
+ assert (((unsigned long) chunk2rawmem (brk) & MALLOC_ALIGN_MASK) == 0);
else
{
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
+ front_misalign = (INTERNAL_SIZE_T) chunk2rawmem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
{
/*
@@ -2832,7 +2886,7 @@ munmap_chunk (mchunkptr p)
if (DUMPED_MAIN_ARENA_CHUNK (p))
return;
- uintptr_t mem = (uintptr_t) chunk2mem (p);
+ uintptr_t mem = (uintptr_t) chunk2rawmem (p);
uintptr_t block = (uintptr_t) p - prev_size (p);
size_t total_size = prev_size (p) + size;
/* Unfortunately we have to do the compilers job by hand here. Normally
@@ -2887,7 +2941,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
p = (mchunkptr) (cp + offset);
- assert (aligned_OK (chunk2mem (p)));
+ assert (aligned_OK (chunk2rawmem (p)));
assert (prev_size (p) == offset);
set_head (p, (new_size - offset) | IS_MMAPPED);
@@ -3068,14 +3122,15 @@ __libc_malloc (size_t bytes)
&& tcache
&& tcache->counts[tc_idx] > 0)
{
- return tcache_get (tc_idx);
+ victim = tcache_get (tc_idx);
+ return TAG_NEW_USABLE (victim);
}
DIAG_POP_NEEDS_COMMENT;
#endif
if (SINGLE_THREAD_P)
{
- victim = _int_malloc (&main_arena, bytes);
+ victim = TAG_NEW_USABLE (_int_malloc (&main_arena, bytes));
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
&main_arena == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -3096,6 +3151,8 @@ __libc_malloc (size_t bytes)
if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);
+ victim = TAG_NEW_USABLE (victim);
+
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -3119,8 +3176,17 @@ __libc_free (void *mem)
if (mem == 0) /* free(0) has no effect */
return;
+#ifdef _LIBC_MTAG
+ /* Quickly check that the freed pointer matches the tag for the memory.
+ This gives a useful double-free detection. */
+ *(volatile char *)mem;
+#endif
+
p = mem2chunk (mem);
+ /* Mark the chunk as belonging to the library again. */
+ (void)TAG_REGION (chunk2rawmem (p), __malloc_usable_size (mem));
+
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
/* See if the dynamic brk/mmap threshold needs adjusting.
@@ -3170,6 +3236,12 @@ __libc_realloc (void *oldmem, size_t bytes)
if (oldmem == 0)
return __libc_malloc (bytes);
+#ifdef _LIBC_MTAG
+ /* Perform a quick check to ensure that the pointer's tag matches the
+ memory's tag. */
+ *(volatile char*) oldmem;
+#endif
+
/* chunk corresponding to oldmem */
const mchunkptr oldp = mem2chunk (oldmem);
/* its size */
@@ -3225,7 +3297,15 @@ __libc_realloc (void *oldmem, size_t bytes)
#if HAVE_MREMAP
newp = mremap_chunk (oldp, nb);
if (newp)
- return chunk2mem (newp);
+ {
+ void *newmem = chunk2rawmem (newp);
+ /* Give the new block a different tag. This helps to ensure
+ that stale handles to the previous mapping are not
+ reused. There's a performance hit for both us and the
+ caller for doing this, so we might want to
+ reconsider. */
+ return TAG_NEW_USABLE (newmem);
+ }
#endif
/* Note the extra SIZE_SZ overhead. */
if (oldsize - SIZE_SZ >= nb)
@@ -3308,7 +3388,6 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
return 0;
}
-
/* Make sure alignment is power of 2. */
if (!powerof2 (alignment))
{
@@ -3323,8 +3402,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
p = _int_memalign (&main_arena, alignment, bytes);
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
&main_arena == arena_for_chunk (mem2chunk (p)));
-
- return p;
+ return TAG_NEW_USABLE (p);
}
arena_get (ar_ptr, bytes + alignment + MINSIZE);
@@ -3342,7 +3420,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
- return p;
+ return TAG_NEW_USABLE (p);
}
/* For ISO C11. */
weak_alias (__libc_memalign, aligned_alloc)
@@ -3351,17 +3429,22 @@ libc_hidden_def (__libc_memalign)
void *
__libc_valloc (size_t bytes)
{
+ void *p;
+
if (__malloc_initialized < 0)
ptmalloc_init ();
void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize);
- return _mid_memalign (pagesize, bytes, address);
+ p = _mid_memalign (pagesize, bytes, address);
+ return TAG_NEW_USABLE (p);
}
void *
__libc_pvalloc (size_t bytes)
{
+ void *p;
+
if (__malloc_initialized < 0)
ptmalloc_init ();
@@ -3378,19 +3461,22 @@ __libc_pvalloc (size_t bytes)
}
rounded_bytes = rounded_bytes & -(pagesize - 1);
- return _mid_memalign (pagesize, rounded_bytes, address);
+ p = _mid_memalign (pagesize, rounded_bytes, address);
+ return TAG_NEW_USABLE (p);
}
void *
__libc_calloc (size_t n, size_t elem_size)
{
mstate av;
- mchunkptr oldtop, p;
- INTERNAL_SIZE_T sz, csz, oldtopsize;
+ mchunkptr oldtop;
+ INTERNAL_SIZE_T sz, oldtopsize;
void *mem;
+#ifndef _LIBC_MTAG
unsigned long clearsize;
unsigned long nclears;
INTERNAL_SIZE_T *d;
+#endif
ptrdiff_t bytes;
if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
@@ -3398,6 +3484,7 @@ __libc_calloc (size_t n, size_t elem_size)
__set_errno (ENOMEM);
return NULL;
}
+
sz = bytes;
void *(*hook) (size_t, const void *) =
@@ -3467,7 +3554,14 @@ __libc_calloc (size_t n, size_t elem_size)
if (mem == 0)
return 0;
- p = mem2chunk (mem);
+ /* If we are using memory tagging, then we need to set the tags
+ regardless of MORECORE_CLEARS, so we zero the whole block while
+ doing so. */
+#ifdef _LIBC_MTAG
+ return TAG_NEW_MEMSET (mem, 0, __malloc_usable_size (mem));
+#else
+ mchunkptr p = mem2chunk (mem);
+ INTERNAL_SIZE_T csz = chunksize (p);
/* Two optional cases in which clearing not necessary */
if (chunk_is_mmapped (p))
@@ -3478,8 +3572,6 @@ __libc_calloc (size_t n, size_t elem_size)
return mem;
}
- csz = chunksize (p);
-
#if MORECORE_CLEARS
if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
{
@@ -3522,6 +3614,7 @@ __libc_calloc (size_t n, size_t elem_size)
}
return mem;
+#endif
}
/*
@@ -4618,7 +4711,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
av->top = chunk_at_offset (oldp, nb);
set_head (av->top, (newsize - nb) | PREV_INUSE);
check_inuse_chunk (av, oldp);
- return chunk2mem (oldp);
+ return TAG_NEW_USABLE (chunk2rawmem (oldp));
}
/* Try to expand forward into next chunk; split off remainder below */
@@ -4651,7 +4744,10 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
}
else
{
- memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
+ void *oldmem = chunk2mem (oldp);
+ newmem = TAG_NEW_USABLE (newmem);
+ memcpy (newmem, oldmem, __malloc_usable_size (oldmem));
+ (void) TAG_REGION (chunk2rawmem (oldp), oldsize);
_int_free (av, oldp, 1);
check_inuse_chunk (av, newp);
return chunk2mem (newp);
@@ -4673,6 +4769,8 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
else /* split remainder */
{
remainder = chunk_at_offset (newp, nb);
+ /* Clear any user-space tags before writing the header. */
+ remainder = TAG_REGION (remainder, remainder_size);
set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
@@ -4682,8 +4780,8 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
}
check_inuse_chunk (av, newp);
- return chunk2mem (newp);
-}
+ return TAG_NEW_USABLE (chunk2rawmem (newp));
+ }
/*
------------------------------ memalign ------------------------------
@@ -4760,7 +4858,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
p = newp;
assert (newsize >= nb &&
- (((unsigned long) (chunk2mem (p))) % alignment) == 0);
+ (((unsigned long) (chunk2rawmem (p))) % alignment) == 0);
}
/* Also give back spare room at the end */
@@ -4814,7 +4912,7 @@ mtrim (mstate av, size_t pad)
+ sizeof (struct malloc_chunk)
+ psm1) & ~psm1);
- assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
+ assert ((char *) chunk2rawmem (p) + 4 * SIZE_SZ <= paligned_mem);
assert ((char *) p + size > paligned_mem);
/* This is the size we could potentially free. */
@@ -4877,20 +4975,30 @@ musable (void *mem)
mchunkptr p;
if (mem != 0)
{
+ size_t result = 0;
+
p = mem2chunk (mem);
if (__builtin_expect (using_malloc_checking == 1, 0))
- return malloc_check_get_size (p);
+ return malloc_check_get_size (p);
if (chunk_is_mmapped (p))
{
if (DUMPED_MAIN_ARENA_CHUNK (p))
- return chunksize (p) - SIZE_SZ;
+ result = chunksize (p) - SIZE_SZ;
else
- return chunksize (p) - 2 * SIZE_SZ;
+ result = chunksize (p) - 2 * SIZE_SZ;
}
else if (inuse (p))
- return chunksize (p) - SIZE_SZ;
+ result = chunksize (p) - SIZE_SZ;
+
+#ifdef _LIBC_MTAG
+ /* The usable space may be reduced if memory tagging is needed,
+ since we cannot share the user-space data with malloc's internal
+ data structure. */
+ result &= __mtag_granule_mask;
+#endif
+ return result;
}
return 0;
}
@@ -77,6 +77,13 @@ extern void *pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
contiguous pieces of memory. */
extern void *(*__morecore) (ptrdiff_t __size);
+#ifdef _LIBC_MTAG
+extern int __mtag_mmap_flags;
+#define MTAG_MMAP_FLAGS __mtag_mmap_flags
+#else
+#define MTAG_MMAP_FLAGS 0
+#endif
+
/* Default value of `__morecore'. */
extern void *__default_morecore (ptrdiff_t __size)
__THROW __attribute_malloc__;
new file mode 100644
@@ -0,0 +1,52 @@
+/* libc-internal interface for tagged (colored) memory support.
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GENERIC_LIBC_MTAG_H
+#define _GENERIC_LIBC_MTAG_H 1
+
+/* Generic bindings for systems that do not support memory tagging. */
+
+/* Used to ensure additional alignment when objects need to have distinct
+ tags. */
+#define __MTAG_GRANULE_SIZE 1
+
+/* Non-zero if memory obtained via morecore (sbrk) is not tagged. */
+#define __MTAG_SBRK_UNTAGGED 0
+
+/* Extra flags to pass to mmap() to request a tagged region of memory. */
+#define __MTAG_MMAP_FLAGS 0
+
+/* Set the tags for a region of memory, which must have size and alignment
+ that are multiples of __MTAG_GRANULE_SIZE. Size cannot be zero.
+ void *__libc_mtag_tag_region (const void *, size_t) */
+#define __libc_mtag_tag_region(p, s) (p)
+
+/* Optimized equivalent to __libc_mtag_tag_region followed by memset. */
+#define __libc_mtag_memset_with_tag memset
+
+/* Convert address P to a pointer that is tagged correctly for that
+ location.
+ void *__libc_mtag_address_get_tag (void*) */
+#define __libc_mtag_address_get_tag(p) (p)
+
+/* Assign a new (random) tag to a pointer P (does not adjust the tag on
+ the memory addressed).
+ void *__libc_mtag_new_tag (void*) */
+#define __libc_mtag_new_tag(p) (p)
+
+#endif /* _GENERIC_LIBC_MTAG_H */