[v3,7/8] aarch64: Add sysv specific enabling code for memory tagging

Message ID 20201123154236.25809-8-rearnsha@arm.com
State Superseded
Headers
Series Memory tagging support |

Commit Message

Richard Earnshaw Nov. 23, 2020, 3:42 p.m. UTC
  Add various defines and stubs for enabling MTE on AArch64 sysv-like
systems such as Linux.  The HWCAP feature bit is copied over in the
same way as other feature bits.  Similarly we add a new wrapper header
for mman.h to define the PROT_MTE flag that can be used with mmap and
related functions.

We add a new field to struct cpu_features that can be used, for
example, to check whether or not certain ifunc'd routines should be
bound to MTE-safe versions.

Finally, if we detect that MTE should be enabled (ie via the glibc
tunable); we enable MTE during startup as required.
---
 sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h  |  1 +
 sysdeps/unix/sysv/linux/aarch64/bits/mman.h   |  7 +++++
 .../unix/sysv/linux/aarch64/cpu-features.c    | 28 +++++++++++++++++++
 .../unix/sysv/linux/aarch64/cpu-features.h    |  1 +
 4 files changed, 37 insertions(+)
  

Comments

Szabolcs Nagy Nov. 23, 2020, 4:53 p.m. UTC | #1
The 11/23/2020 15:42, Richard Earnshaw wrote:
> --- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> @@ -1,4 +1,5 @@
>  /* Definitions for POSIX memory map interface.  Linux/AArch64 version.
> +
>     Copyright (C) 2020 Free Software Foundation, Inc.
>     This file is part of the GNU C Library.
>  
> @@ -25,6 +26,12 @@
>  
>  #define PROT_BTI	0x10
>  
> +/* The following definitions basically come from the kernel headers.
> +   But the kernel header is not namespace clean.  */
> +
> +/* Other flags.  */
> +#define PROT_MTE	0x20		/* Normal Tagged mapping.  */
> +

the comments are not needed and the PROT_MTE one in
particular adds unnecessary risk: copying code from
linux uapi headers is ok (it's the api), but copying
comments is not ok (they are copyrightable).

i think all we need is a reference to the appropriate
linux uapi header which is already there for PROT_BTI.
  
Richard Earnshaw (lists) Nov. 23, 2020, 5:33 p.m. UTC | #2
On 23/11/2020 16:53, Szabolcs Nagy wrote:
> The 11/23/2020 15:42, Richard Earnshaw wrote:
>> --- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> @@ -1,4 +1,5 @@
>>  /* Definitions for POSIX memory map interface.  Linux/AArch64 version.
>> +
>>     Copyright (C) 2020 Free Software Foundation, Inc.
>>     This file is part of the GNU C Library.
>>  
>> @@ -25,6 +26,12 @@
>>  
>>  #define PROT_BTI     0x10
>>  
>> +/* The following definitions basically come from the kernel headers.
>> +   But the kernel header is not namespace clean.  */
>> +
>> +/* Other flags.  */
>> +#define PROT_MTE     0x20            /* Normal Tagged mapping.  */
>> +
> 
> the comments are not needed and the PROT_MTE one in
> particular adds unnecessary risk: copying code from
> linux uapi headers is ok (it's the api), but copying
> comments is not ok (they are copyrightable).
> 
> i think all we need is a reference to the appropriate
> linux uapi header which is already there for PROT_BTI.

OK, will fix.

R.
  
Siddhesh Poyarekar Nov. 25, 2020, 3:34 p.m. UTC | #3
On 11/23/20 9:12 PM, Richard Earnshaw via Libc-alpha wrote:
> 
> Add various defines and stubs for enabling MTE on AArch64 sysv-like
> systems such as Linux.  The HWCAP feature bit is copied over in the
> same way as other feature bits.  Similarly we add a new wrapper header
> for mman.h to define the PROT_MTE flag that can be used with mmap and
> related functions.
> 
> We add a new field to struct cpu_features that can be used, for
> example, to check whether or not certain ifunc'd routines should be
> bound to MTE-safe versions.
> 
> Finally, if we detect that MTE should be enabled (ie via the glibc
> tunable); we enable MTE during startup as required.
> ---
>   sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h  |  1 +
>   sysdeps/unix/sysv/linux/aarch64/bits/mman.h   |  7 +++++
>   .../unix/sysv/linux/aarch64/cpu-features.c    | 28 +++++++++++++++++++
>   .../unix/sysv/linux/aarch64/cpu-features.h    |  1 +
>   4 files changed, 37 insertions(+)
> 
> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> index af90d8a626..389852f1d9 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
> @@ -73,3 +73,4 @@
>  #define HWCAP2_DGH		(1 << 15)
>  #define HWCAP2_RNG		(1 << 16)
>  #define HWCAP2_BTI		(1 << 17)
> +#define HWCAP2_MTE		(1 << 18)
> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> index ecae046344..3658b958b5 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
> @@ -1,4 +1,5 @@
>  /* Definitions for POSIX memory map interface.  Linux/AArch64 version.
> +

Unnecessary newline.  I spotted a couple in 3/8 as well.

>     Copyright (C) 2020 Free Software Foundation, Inc.
>     This file is part of the GNU C Library.
>  
> @@ -25,6 +26,12 @@
>  
>  #define PROT_BTI	0x10
>  
> +/* The following definitions basically come from the kernel headers.
> +   But the kernel header is not namespace clean.  */
> +
> +/* Other flags.  */
> +#define PROT_MTE	0x20		/* Normal Tagged mapping.  */
> +
>  #include <bits/mman-map-flags-generic.h>
>  
>  /* Include generic Linux declarations.  */
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> index b9ab827aca..aa4a82c6e8 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
> @@ -19,6 +19,7 @@
>  #include <cpu-features.h>
>  #include <sys/auxv.h>
>  #include <elf/dl-hwcaps.h>
> +#include <sys/prctl.h>
>  
>  #define DCZID_DZP_MASK (1 << 4)
>  #define DCZID_BS_MASK (0xf)
> @@ -86,4 +87,31 @@ init_cpu_features (struct cpu_features *cpu_features)
>  
>    /* Check if BTI is supported.  */
>    cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
> +
> +  /* Setup memory tagging support if the HW and kernel support it, and if
> +     the user has requested it.  */
> +  cpu_features->mte_state = 0;
> +
> +#ifdef _LIBC_MTAG
> +# if HAVE_TUNABLES
> +  int mte_state = TUNABLE_GET (glibc, memtag, enable, unsigned, 0);
> +  cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
> +  /* If we lack the MTE feature, disable the tunable, since it will
> +     otherwise cause instructions that won't run on this CPU to be used.  */
> +  TUNABLE_SET (glibc, memtag, enable, unsigned, cpu_features->mte_state);
> +# endif
> +
> +  /* For now, disallow tag 0, so that we can clearly see when tagged
> +     addresses are being allocated.  */
> +  if (cpu_features->mte_state & 2)
> +    __prctl (PR_SET_TAGGED_ADDR_CTRL,
> +	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC
> +	      | (0xfffe << PR_MTE_TAG_SHIFT)),

Couldn't this magic number also become a macro too?

> +	     0, 0, 0);
> +  else if (cpu_features->mte_state)
> +    __prctl (PR_SET_TAGGED_ADDR_CTRL,
> +	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC
> +	      | (0xfffe << PR_MTE_TAG_SHIFT)),

Likewise.

> +	     0, 0, 0);
> +#endif
>  }
> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> index 00a4d0c8e7..838d5c9aba 100644
> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
> @@ -70,6 +70,7 @@ struct cpu_features
>    uint64_t midr_el1;
>    unsigned zva_size;
>    bool bti;
> +  unsigned mte_state;

This could be just a byte unless you foresee expanding the MTE flags 
beyond the 8 bits you've specified in the tunables.

>  };
>  
>  #endif /* _CPU_FEATURES_AARCH64_H  */
  
Richard Earnshaw Nov. 25, 2020, 4:06 p.m. UTC | #4
On 25/11/2020 15:34, Siddhesh Poyarekar wrote:
> On 11/23/20 9:12 PM, Richard Earnshaw via Libc-alpha wrote:
>>
>> Add various defines and stubs for enabling MTE on AArch64 sysv-like
>> systems such as Linux.  The HWCAP feature bit is copied over in the
>> same way as other feature bits.  Similarly we add a new wrapper header
>> for mman.h to define the PROT_MTE flag that can be used with mmap and
>> related functions.
>>
>> We add a new field to struct cpu_features that can be used, for
>> example, to check whether or not certain ifunc'd routines should be
>> bound to MTE-safe versions.
>>
>> Finally, if we detect that MTE should be enabled (ie via the glibc
>> tunable); we enable MTE during startup as required.
>> ---
>>   sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h  |  1 +
>>   sysdeps/unix/sysv/linux/aarch64/bits/mman.h   |  7 +++++
>>   .../unix/sysv/linux/aarch64/cpu-features.c    | 28 +++++++++++++++++++
>>   .../unix/sysv/linux/aarch64/cpu-features.h    |  1 +
>>   4 files changed, 37 insertions(+)
>>
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
>> b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
>> index af90d8a626..389852f1d9 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
>> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
>> @@ -73,3 +73,4 @@
>>  #define HWCAP2_DGH        (1 << 15)
>>  #define HWCAP2_RNG        (1 << 16)
>>  #define HWCAP2_BTI        (1 << 17)
>> +#define HWCAP2_MTE        (1 << 18)
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> index ecae046344..3658b958b5 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> +++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
>> @@ -1,4 +1,5 @@
>>  /* Definitions for POSIX memory map interface.  Linux/AArch64 version.
>> +
> 
> Unnecessary newline.  I spotted a couple in 3/8 as well.
> 
>>     Copyright (C) 2020 Free Software Foundation, Inc.
>>     This file is part of the GNU C Library.
>>  
>> @@ -25,6 +26,12 @@
>>  
>>  #define PROT_BTI    0x10
>>  
>> +/* The following definitions basically come from the kernel headers.
>> +   But the kernel header is not namespace clean.  */
>> +
>> +/* Other flags.  */
>> +#define PROT_MTE    0x20        /* Normal Tagged mapping.  */
>> +
>>  #include <bits/mman-map-flags-generic.h>
>>  
>>  /* Include generic Linux declarations.  */
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
>> b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
>> index b9ab827aca..aa4a82c6e8 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
>> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
>> @@ -19,6 +19,7 @@
>>  #include <cpu-features.h>
>>  #include <sys/auxv.h>
>>  #include <elf/dl-hwcaps.h>
>> +#include <sys/prctl.h>
>>  
>>  #define DCZID_DZP_MASK (1 << 4)
>>  #define DCZID_BS_MASK (0xf)
>> @@ -86,4 +87,31 @@ init_cpu_features (struct cpu_features *cpu_features)
>>  
>>    /* Check if BTI is supported.  */
>>    cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
>> +
>> +  /* Setup memory tagging support if the HW and kernel support it,
>> and if
>> +     the user has requested it.  */
>> +  cpu_features->mte_state = 0;
>> +
>> +#ifdef _LIBC_MTAG
>> +# if HAVE_TUNABLES
>> +  int mte_state = TUNABLE_GET (glibc, memtag, enable, unsigned, 0);
>> +  cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ?
>> mte_state : 0;
>> +  /* If we lack the MTE feature, disable the tunable, since it will
>> +     otherwise cause instructions that won't run on this CPU to be
>> used.  */
>> +  TUNABLE_SET (glibc, memtag, enable, unsigned,
>> cpu_features->mte_state);
>> +# endif
>> +
>> +  /* For now, disallow tag 0, so that we can clearly see when tagged
>> +     addresses are being allocated.  */
>> +  if (cpu_features->mte_state & 2)
>> +    __prctl (PR_SET_TAGGED_ADDR_CTRL,
>> +         (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC
>> +          | (0xfffe << PR_MTE_TAG_SHIFT)),
> 
> Couldn't this magic number also become a macro too?

Well, I could rewrite (0xfffe << PR_MTE_TAG_SHIFT) as

  (PR_MTE_TAG_MASK & ~(1UL << PR_MTE_TAG_SHIFT))

but I'm not entirely convinced that's a lot more understandable: it's
just a bit-mask of tags that can be enabled.

> 
>> +         0, 0, 0);
>> +  else if (cpu_features->mte_state)
>> +    __prctl (PR_SET_TAGGED_ADDR_CTRL,
>> +         (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC
>> +          | (0xfffe << PR_MTE_TAG_SHIFT)),
> 
> Likewise.
> 
>> +         0, 0, 0);
>> +#endif
>>  }
>> diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
>> b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
>> index 00a4d0c8e7..838d5c9aba 100644
>> --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
>> +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
>> @@ -70,6 +70,7 @@ struct cpu_features
>>    uint64_t midr_el1;
>>    unsigned zva_size;
>>    bool bti;
>> +  unsigned mte_state;
> 
> This could be just a byte unless you foresee expanding the MTE flags
> beyond the 8 bits you've specified in the tunables.

I don't know how stable this features structure has to be.  If it has to
remain fixed in layout forever (append-only?), then I think I'd rather
keep it as an unsigned to allow for more uses of the tunable as the need
arises.  If it's acceptable to just change the layout aribitrarily
between glibc releases, then using a byte for now would be fine.

> 
>>  };
>>  
>>  #endif /* _CPU_FEATURES_AARCH64_H  */
> 
> 

R.
  
Siddhesh Poyarekar Nov. 25, 2020, 4:20 p.m. UTC | #5
On 11/25/20 9:36 PM, Richard Earnshaw wrote:
> Well, I could rewrite (0xfffe << PR_MTE_TAG_SHIFT) as
> 
>    (PR_MTE_TAG_MASK & ~(1UL << PR_MTE_TAG_SHIFT))
> 
> but I'm not entirely convinced that's a lot more understandable: it's
> just a bit-mask of tags that can be enabled.

It isn't, I think what would work is a single macro that describes what 
(PR_MTE_TAG_MASK & ~(1UL << PR_MTE_TAG_SHIFT)) is, e.g.  DISALLOW_TAG_0 
assuming that that's the intention since I can't tell what it's for at 
the moment.

> I don't know how stable this features structure has to be.  If it has to
> remain fixed in layout forever (append-only?), then I think I'd rather
> keep it as an unsigned to allow for more uses of the tunable as the need
> arises.  If it's acceptable to just change the layout aribitrarily
> between glibc releases, then using a byte for now would be fine.

It's an internal structure, so we could do whatever we want with it.

Siddhesh
  
Siddhesh Poyarekar Nov. 25, 2020, 4:23 p.m. UTC | #6
On 11/25/20 9:50 PM, Siddhesh Poyarekar wrote:
> On 11/25/20 9:36 PM, Richard Earnshaw wrote:
>> Well, I could rewrite (0xfffe << PR_MTE_TAG_SHIFT) as
>>
>>    (PR_MTE_TAG_MASK & ~(1UL << PR_MTE_TAG_SHIFT))
>>
>> but I'm not entirely convinced that's a lot more understandable: it's
>> just a bit-mask of tags that can be enabled.
> 
> It isn't, I think what would work is a single macro that describes what 
> (PR_MTE_TAG_MASK & ~(1UL << PR_MTE_TAG_SHIFT)) is, e.g.  DISALLOW_TAG_0 
> assuming that that's the intention since I can't tell what it's for at 
> the moment.

Of course, I should have read your response properly before suggesting; 
ALLOWED_TAGS_BITMASK should be intuitive enough as long as one isn't 
talking to their kid while reading code.

Siddhesh
  

Patch

diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
index af90d8a626..389852f1d9 100644
--- a/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
+++ b/sysdeps/unix/sysv/linux/aarch64/bits/hwcap.h
@@ -73,3 +73,4 @@ 
 #define HWCAP2_DGH		(1 << 15)
 #define HWCAP2_RNG		(1 << 16)
 #define HWCAP2_BTI		(1 << 17)
+#define HWCAP2_MTE		(1 << 18)
diff --git a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
index ecae046344..3658b958b5 100644
--- a/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
+++ b/sysdeps/unix/sysv/linux/aarch64/bits/mman.h
@@ -1,4 +1,5 @@ 
 /* Definitions for POSIX memory map interface.  Linux/AArch64 version.
+
    Copyright (C) 2020 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
@@ -25,6 +26,12 @@ 
 
 #define PROT_BTI	0x10
 
+/* The following definitions basically come from the kernel headers.
+   But the kernel header is not namespace clean.  */
+
+/* Other flags.  */
+#define PROT_MTE	0x20		/* Normal Tagged mapping.  */
+
 #include <bits/mman-map-flags-generic.h>
 
 /* Include generic Linux declarations.  */
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
index b9ab827aca..aa4a82c6e8 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
@@ -19,6 +19,7 @@ 
 #include <cpu-features.h>
 #include <sys/auxv.h>
 #include <elf/dl-hwcaps.h>
+#include <sys/prctl.h>
 
 #define DCZID_DZP_MASK (1 << 4)
 #define DCZID_BS_MASK (0xf)
@@ -86,4 +87,31 @@  init_cpu_features (struct cpu_features *cpu_features)
 
   /* Check if BTI is supported.  */
   cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
+
+  /* Setup memory tagging support if the HW and kernel support it, and if
+     the user has requested it.  */
+  cpu_features->mte_state = 0;
+
+#ifdef _LIBC_MTAG
+# if HAVE_TUNABLES
+  int mte_state = TUNABLE_GET (glibc, memtag, enable, unsigned, 0);
+  cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
+  /* If we lack the MTE feature, disable the tunable, since it will
+     otherwise cause instructions that won't run on this CPU to be used.  */
+  TUNABLE_SET (glibc, memtag, enable, unsigned, cpu_features->mte_state);
+# endif
+
+  /* For now, disallow tag 0, so that we can clearly see when tagged
+     addresses are being allocated.  */
+  if (cpu_features->mte_state & 2)
+    __prctl (PR_SET_TAGGED_ADDR_CTRL,
+	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC
+	      | (0xfffe << PR_MTE_TAG_SHIFT)),
+	     0, 0, 0);
+  else if (cpu_features->mte_state)
+    __prctl (PR_SET_TAGGED_ADDR_CTRL,
+	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC
+	      | (0xfffe << PR_MTE_TAG_SHIFT)),
+	     0, 0, 0);
+#endif
 }
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
index 00a4d0c8e7..838d5c9aba 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
@@ -70,6 +70,7 @@  struct cpu_features
   uint64_t midr_el1;
   unsigned zva_size;
   bool bti;
+  unsigned mte_state;
 };
 
 #endif /* _CPU_FEATURES_AARCH64_H  */