[v12,03/17] Add Linux 'extra TLS'

Message ID 20240801-rseq-abi-v11-split-v12-3-0e87479dddc0@efficios.com
State New
Headers
Series Add rseq extensible ABI support |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent

Commit Message

Michael Jeanson Aug. 5, 2024, 8:48 p.m. UTC
  Add the Linux implementation of 'extra TLS' which will allocate space
for the rseq area at the end of the TLS blocks in allocation order.

Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
---
 sysdeps/unix/sysv/linux/dl-extra_tls.h | 76 ++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)
  

Patch

diff --git a/sysdeps/unix/sysv/linux/dl-extra_tls.h b/sysdeps/unix/sysv/linux/dl-extra_tls.h
new file mode 100644
index 0000000000..d00207a76e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/dl-extra_tls.h
@@ -0,0 +1,76 @@ 
+/* extra tls block utils for the dynamic linker.  Generic stub version.
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _DL_EXTRA_TLS_H
+#define _DL_EXTRA_TLS_H 1
+
+#include <stddef.h>
+#include <sys/rseq.h>
+#include <rseq-internal.h>
+#include <elf/dl-tunables.h>
+
+/* Returns the size of the extra TLS block.  */
+static inline size_t
+_dl_extra_tls_get_size (void)
+{
+  bool do_rseq = true;
+  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
+  if (do_rseq)
+    {
+      /* Make sure the rseq area size is at least the minimum ABI size and a
+         multiple of the requested aligment.  */
+      return roundup (MAX (_rseq_size, RSEQ_AREA_SIZE_INITIAL), _rseq_align);
+    }
+
+  /* Even when disabled by tunable, an rseq area will be allocated to allow
+     application code to test the registration status with 'rseq->cpu_id >= 0'.
+     Default to the rseq ABI minimum size, this will ensure we don't use more
+     TLS than necessary.  */
+  return RSEQ_AREA_SIZE_INITIAL;
+}
+
+/* Returns the alignment requirements of the extra TLS block.  */
+static inline size_t
+_dl_extra_tls_get_align (void)
+{
+  bool do_rseq = true;
+  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
+  if (do_rseq)
+    {
+      return _rseq_align;
+    }
+
+  /* Even when disabled by tunable, an rseq area will be allocated to allow
+     application code to test the registration status with 'rseq->cpu_id >= 0'.
+     Default to the rseq ABI minimum alignment, this will ensure we don't use
+     more TLS than necessary.  */
+  return RSEQ_MIN_ALIGN;
+}
+
+/* Record the offset of the extra TLS block from the thread pointer.  */
+static inline void
+_dl_extra_tls_set_offset (ptrdiff_t tls_offset)
+{
+#ifdef RSEQ_SIG
+    /* Not all targets support __thread_pointer, so set __rseq_offset only if
+      the rseq registration may have happened because RSEQ_SIG is defined.  */
+    _rseq_offset = tls_offset;
+#endif
+}
+
+#endif