[v5,5/6] aarch64: Add sysv specific enabling code for memory tagging
Commit Message
Add various defines and stubs for enabling MTE on AArch64 sysv-like
systems such as Linux. The HWCAP feature bit is copied over in the
same way as other feature bits. Similarly we add a new wrapper header
for mman.h to define the PROT_MTE flag that can be used with mmap and
related functions.
We add a new field to struct cpu_features that can be used, for
example, to check whether or not certain ifunc'd routines should be
bound to MTE-safe versions.
Finally, if we detect that MTE should be enabled (ie via the glibc
tunable); we enable MTE during startup as required.
Support in the Linux kernel was added in version 5.10.
Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
---
sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h | 1 +
sysdeps/unix/sysv/linux/aarch64/bits/mman.h | 1 +
.../unix/sysv/linux/aarch64/cpu-features.c | 30 +++++++++++++++++++
.../unix/sysv/linux/aarch64/cpu-features.h | 2 ++
4 files changed, 34 insertions(+)
@@ -73,3 +73,4 @@
#define HWCAP2_DGH (1 << 15)
#define HWCAP2_RNG (1 << 16)
#define HWCAP2_BTI (1 << 17)
+#define HWCAP2_MTE (1 << 18)
@@ -24,6 +24,7 @@
arch/arm64/include/uapi/asm/mman.h. */
#define PROT_BTI 0x10
+#define PROT_MTE 0x20
#include <bits/mman-map-flags-generic.h>
@@ -19,10 +19,17 @@
#include <cpu-features.h>
#include <sys/auxv.h>
#include <elf/dl-hwcaps.h>
+#include <sys/prctl.h>
#define DCZID_DZP_MASK (1 << 4)
#define DCZID_BS_MASK (0xf)
+/* The maximal set of permitted tags that the MTE random tag generation
+ instruction may use. We exclude tag 0 because a) we want to reserve
+ that for the libc heap structures and b) because it makes it easier
+ to see when pointer have been correctly tagged. */
+#define MTE_ALLOWED_TAGS (0xfffe << PR_MTE_TAG_SHIFT)
+
#if HAVE_TUNABLES
struct cpu_list
{
@@ -86,4 +93,27 @@ init_cpu_features (struct cpu_features *cpu_features)
/* Check if BTI is supported. */
cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
+
+ /* Setup memory tagging support if the HW and kernel support it, and if
+ the user has requested it. */
+ cpu_features->mte_state = 0;
+
+#ifdef USE_MTAG
+# if HAVE_TUNABLES
+ int mte_state = TUNABLE_GET (glibc, mem, tagging, unsigned, 0);
+ cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
+ /* If we lack the MTE feature, disable the tunable, since it will
+ otherwise cause instructions that won't run on this CPU to be used. */
+ TUNABLE_SET (glibc, mem, tagging, unsigned, cpu_features->mte_state);
+# endif
+
+ if (cpu_features->mte_state & 2)
+ __prctl (PR_SET_TAGGED_ADDR_CTRL,
+ (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | MTE_ALLOWED_TAGS),
+ 0, 0, 0);
+ else if (cpu_features->mte_state)
+ __prctl (PR_SET_TAGGED_ADDR_CTRL,
+ (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | MTE_ALLOWED_TAGS),
+ 0, 0, 0);
+#endif
}
@@ -70,6 +70,8 @@ struct cpu_features
uint64_t midr_el1;
unsigned zva_size;
bool bti;
+ /* Currently, the GLIBC memory tagging tunable only defines 8 bits. */
+ uint8_t mte_state;
};
#endif /* _CPU_FEATURES_AARCH64_H */