aligned_alloc: conform to C17

Message ID xnv8j02zji.fsf@greed.delorie.com
State Superseded
Headers
Series aligned_alloc: conform to C17 |

Checks

Context Check Description
dj/TryBot-apply_patch success Patch applied to master at the time it was sent
dj/TryBot-32bit fail Patch series failed to build

Commit Message

DJ Delorie March 16, 2023, 8:48 p.m. UTC
  References:
https://patchwork.sourceware.org/project/glibc/patch/33ec9e0c1e587813b90e8aa771c2c8e6e379dd48.camel@posteo.net/
https://sourceware.org/bugzilla/show_bug.cgi?id=20137
https://sourceware.org/pipermail/libc-alpha/2023-February/145858.html

The memory.texi portion matches Martin's proposed patch.

man page portion, quoted to avoid CI/CD issues (I can send an official
patch separately after the glibc patch is applied):

> diff --git a/man3/posix_memalign.3 b/man3/posix_memalign.3
> index f5d6618b7..a73ff0421 100644
> --- a/man3/posix_memalign.3
> +++ b/man3/posix_memalign.3
> @@ -91,9 +91,8 @@ The function
>  is the same as
>  .BR memalign (),
>  except for the added restriction that
> -.I size
> -should be a multiple of
> -.IR alignment .
> +.I alignment
> +must be a power of two.
>  .PP
>  The obsolete function
>  .BR valloc ()


From 4767e0e764e1a7a5ef01e303f503036379dd42c5 Mon Sep 17 00:00:00 2001
From: DJ Delorie <dj@redhat.com>
Date: Thu, 16 Mar 2023 01:33:41 -0400
Subject: aligned_alloc: conform to C17

This patch adds the strict checking for power-of-two alignments
in aligned_alloc(), and updates the manual accordingly.
  

Comments

Adhemerval Zanella March 16, 2023, 9 p.m. UTC | #1
On 16/03/23 17:48, DJ Delorie via Libc-alpha wrote:
> References:
> https://patchwork.sourceware.org/project/glibc/patch/33ec9e0c1e587813b90e8aa771c2c8e6e379dd48.camel@posteo.net/
> https://sourceware.org/bugzilla/show_bug.cgi?id=20137
> https://sourceware.org/pipermail/libc-alpha/2023-February/145858.html
> 
> The memory.texi portion matches Martin's proposed patch.
> 
> man page portion, quoted to avoid CI/CD issues (I can send an official
> patch separately after the glibc patch is applied):
> 
>> diff --git a/man3/posix_memalign.3 b/man3/posix_memalign.3
>> index f5d6618b7..a73ff0421 100644
>> --- a/man3/posix_memalign.3
>> +++ b/man3/posix_memalign.3
>> @@ -91,9 +91,8 @@ The function
>>  is the same as
>>  .BR memalign (),
>>  except for the added restriction that
>> -.I size
>> -should be a multiple of
>> -.IR alignment .
>> +.I alignment
>> +must be a power of two.
>>  .PP
>>  The obsolete function
>>  .BR valloc ()
> 
> 
> From 4767e0e764e1a7a5ef01e303f503036379dd42c5 Mon Sep 17 00:00:00 2001
> From: DJ Delorie <dj@redhat.com>
> Date: Thu, 16 Mar 2023 01:33:41 -0400
> Subject: aligned_alloc: conform to C17
> 
> This patch adds the strict checking for power-of-two alignments
> in aligned_alloc(), and updates the manual accordingly.

Hi D.J, this patch does not build correctly [1].  Maybe you send it too soon?

[1] https://patchwork.sourceware.org/project/glibc/patch/xnv8j02zji.fsf@greed.delorie.com/

> 
> diff --git a/malloc/Versions b/malloc/Versions
> index c763395c6d..28f41a94f3 100644
> --- a/malloc/Versions
> +++ b/malloc/Versions
> @@ -67,6 +67,9 @@ libc {
>    GLIBC_2.33 {
>      mallinfo2;
>    }
> +  GLIBC_2.38 {
> +    __libc_aligned_alloc;
> +  }
>    GLIBC_PRIVATE {
>      # Internal startup hook for libpthread.
>      __libc_malloc_pthread_startup;

How is is actually used? There is no redirection in the header if -std=c17 is used,
nor any abilist update.

> diff --git a/malloc/malloc-debug.c b/malloc/malloc-debug.c
> index 3867d15698..4d2ec04a1a 100644
> --- a/malloc/malloc-debug.c
> +++ b/malloc/malloc-debug.c
> @@ -268,7 +268,8 @@ __debug_realloc (void *oldmem, size_t bytes)
>  strong_alias (__debug_realloc, realloc)
>  
>  static void *
> -_debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
> +_debug_mid_memalign (size_t alignment, size_t bytes, const void *address,
> +		     int check_alignment)
>  {
>    void *(*hook) (size_t, size_t, const void *) =
>      atomic_forced_read (__memalign_hook);
> @@ -281,9 +282,15 @@ _debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
>    if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)
>         || !memalign_mcheck_before (alignment, &bytes, &victim)))
>      {
> -      victim = (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK)
> -		? memalign_check (alignment, bytes)
> -		: __libc_memalign (alignment, bytes));
> +      if (check_alignment && !powerof2 (alignment))
> +	{
> +	  __set_errno (EINVAL);
> +	  victim = NULL;
> +	}
> +      else
> +	victim = (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK)
> +		  ? memalign_check (alignment, bytes)
> +		  : __libc_memalign (alignment, bytes));
>      }
>    if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK) && victim != NULL)
>      victim = memalign_mcheck_after (victim, alignment, orig_bytes);
> @@ -296,10 +303,15 @@ _debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
>  static void *
>  __debug_memalign (size_t alignment, size_t bytes)
>  {
> -  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0));
> +  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0), 0);
>  }
>  strong_alias (__debug_memalign, memalign)
> -strong_alias (__debug_memalign, aligned_alloc)
> +static void *
> +__debug_aligned_alloc (size_t alignment, size_t bytes)
> +{
> +  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0), 1);
> +}
> +strong_alias (__debug_aligned_alloc, aligned_alloc)
>  
>  static void *
>  __debug_pvalloc (size_t bytes)
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> index 76c50e3f58..09619ed168 100644
> --- a/malloc/malloc.c
> +++ b/malloc/malloc.c
> @@ -656,6 +656,9 @@ libc_hidden_proto (__libc_realloc)
>  void*  __libc_memalign(size_t, size_t);
>  libc_hidden_proto (__libc_memalign)
>  
> +void * __libc_aligned_alloc (size_t alignment, size_t bytes);
> +libc_hidden_proto (__libc_aligned_alloc)
> +
>  /*
>    valloc(size_t n);
>    Equivalent to memalign(pagesize, n), where pagesize is the page
> @@ -3509,6 +3512,27 @@ __libc_memalign (size_t alignment, size_t bytes)
>    void *address = RETURN_ADDRESS (0);
>    return _mid_memalign (alignment, bytes, address);
>  }
> +libc_hidden_def (__libc_memalign)
> +
> +/* For ISO C11.  */
> +void *
> +__libc_aligned_alloc (size_t alignment, size_t bytes)
> +{
> +  if (!__malloc_initialized)
> +    ptmalloc_init ();
> +
> +  /* Similar to memalign, but ISO C17 requires an error for invalid
> +     alignments.  Valid alignments are non-negative powers of two.  */
> +  if (!powerof2 (alignment))
> +    {
> +      __set_errno (EINVAL);
> +      return 0;
> +    }
> +
> +  void *address = RETURN_ADDRESS (0);
> +  return _mid_memalign (alignment, bytes, address);
> +}
> +libc_hidden_def (__libc_aligned_alloc)
>  
>  static void *
>  _mid_memalign (size_t alignment, size_t bytes, void *address)
> @@ -3567,9 +3591,6 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
>            ar_ptr == arena_for_chunk (mem2chunk (p)));
>    return tag_new_usable (p);
>  }
> -/* For ISO C11.  */
> -weak_alias (__libc_memalign, aligned_alloc)
> -libc_hidden_def (__libc_memalign)
>  
>  void *
>  __libc_valloc (size_t bytes)
> @@ -5903,6 +5924,7 @@ weak_alias (__libc_mallinfo, mallinfo)
>  strong_alias (__libc_mallinfo2, __mallinfo2)
>  weak_alias (__libc_mallinfo2, mallinfo2)
>  strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
> +weak_alias (__libc_aligned_alloc, aligned_alloc)
>  
>  weak_alias (__malloc_stats, malloc_stats)
>  weak_alias (__malloc_usable_size, malloc_usable_size)
> diff --git a/manual/memory.texi b/manual/memory.texi
> index 9d3398a326..8952ff2bfa 100644
> --- a/manual/memory.texi
> +++ b/manual/memory.texi
> @@ -995,7 +995,7 @@ power of two than that, use @code{aligned_alloc} or @code{posix_memalign}.
>  @c Alias to memalign.
>  The @code{aligned_alloc} function allocates a block of @var{size} bytes whose
>  address is a multiple of @var{alignment}.  The @var{alignment} must be a
> -power of two and @var{size} must be a multiple of @var{alignment}.
> +power of two.
>  
>  The @code{aligned_alloc} function returns a null pointer on error and sets
>  @code{errno} to one of the following values:
>
  
Paul Eggert March 16, 2023, 10 p.m. UTC | #2
On 3/16/23 13:48, DJ Delorie via Libc-alpha wrote:
> +  /* Similar to memalign, but ISO C17 requires an error for invalid
> +     alignments.  Valid alignments are non-negative powers of two.  */
> +  if (!powerof2 (alignment))

powerof2 (0) == 1, unfortunately. Does the C standard let aligned_alloc 
(alignment, size) succeed when ALIGNMENT is zero? I think not, as zero 
is 2**-Infinity, and -Infinity is not non-negative. So that line should 
be changed to something like 'if (!powerof2 (alignment) || alignment == 0)'.

There may be other occurrences of this issue in Glibc already; I haven't 
checked.
  
DJ Delorie March 17, 2023, midnight UTC | #3
Adhemerval Zanella Netto via Libc-alpha <libc-alpha@sourceware.org>
writes:
> Hi D.J, this patch does not build correctly [1].  Maybe you send it too soon?

Sigh, fixing...

also, it's DJ not D.J.

>> +  GLIBC_2.38 {
>> +    __libc_aligned_alloc;
>> +  }
> How is is actually used? There is no redirection in the header if -std=c17 is used,
> nor any abilist update.

aligned_alloc() used to be a weak alias to _libc_memalign but since they
now do different things, I changed it to a weak alias to
_libc_aligned_alloc.  I put the new symbol in so that it's the same as
the old symbol.

Ug, I need to update all the .abilist files too...

Paul Eggert <eggert@cs.ucla.edu> writes:
> powerof2 (0) == 1, unfortunately. Does the C standard let aligned_alloc 

_mid_memalign enforces a minimum alignment so this isn't an issue, other
than the pedantry of whether 0 is an iso-valid alignment.

I suspect fixing the macro might be more generally useful than fixing
this edge case, but I haven't looked for any potential side-effects.

As for "non-negative alignment" I assume they mean "passing a negative
number as `alignment' to aligned_alloc()", not referring to the power of
two itself.

 "Every valid alignment value shall be a nonnegative integral power of
 two."

but alignments are also to be represented by type size_t, which is
integral and unsigned, so no interpretation of "nonnegative integral
power of two" even makes sense, other than the pedantic align=zero.

The only other hint is this one:

 "The types char, ... shall have the weakest alignment requirement."

That's align=1, implying that align=0 cannot be a valid alignment.
  
Paul Eggert March 17, 2023, 6:33 a.m. UTC | #4
On 2023-03-16 17:00, DJ Delorie wrote:
> Paul Eggert <eggert@cs.ucla.edu> writes:
>> powerof2 (0) == 1, unfortunately....
> _mid_memalign enforces a minimum alignment so this isn't an issue, other
> than the pedantry of whether 0 is an iso-valid alignment.

Yes, it's the pedantry I was worried about. (That's the main point of 
the patch anyway - namely, conforming to the standard.)


> As for "non-negative alignment" I assume they mean "passing a negative
> number as `alignment' to aligned_alloc()", not referring to the power of
> two itself.

As you mention the C17 wording is not entirely clear, but it's 
implausible that the standardizers intended 0 to be a valid alignment: 
if they had intended that they would have said so clearly.

There's also a commensense argument: a pointer value aligned to A is in 
some sense a multiple of A. But the only multiple of zero is zero. So 
specifying an alignment of zero makes no intuitive sense, except perhaps 
for the all-bits-zero pointer.


> I suspect fixing the macro might be more generally useful than fixing
> this edge case

Although I haven't looked at this in detail, I suspect that changing the 
macro will merely slow other code down slightly. It might be better to 
leave the macro alone (perhaps changing its comment), and to fix just 
aligned_alloc.
  
DJ Delorie March 17, 2023, 7:55 p.m. UTC | #5
Paul Eggert <eggert@cs.ucla.edu> writes:
> As you mention the C17 wording is not entirely clear, but it's 
> implausible that the standardizers intended 0 to be a valid alignment: 
> if they had intended that they would have said so clearly.

I think I was trying to agree with you, using more than "that makes
sense" :-)

The "char align is weakest" statement seems to be the key one.
  

Patch

diff --git a/malloc/Versions b/malloc/Versions
index c763395c6d..28f41a94f3 100644
--- a/malloc/Versions
+++ b/malloc/Versions
@@ -67,6 +67,9 @@  libc {
   GLIBC_2.33 {
     mallinfo2;
   }
+  GLIBC_2.38 {
+    __libc_aligned_alloc;
+  }
   GLIBC_PRIVATE {
     # Internal startup hook for libpthread.
     __libc_malloc_pthread_startup;
diff --git a/malloc/malloc-debug.c b/malloc/malloc-debug.c
index 3867d15698..4d2ec04a1a 100644
--- a/malloc/malloc-debug.c
+++ b/malloc/malloc-debug.c
@@ -268,7 +268,8 @@  __debug_realloc (void *oldmem, size_t bytes)
 strong_alias (__debug_realloc, realloc)
 
 static void *
-_debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
+_debug_mid_memalign (size_t alignment, size_t bytes, const void *address,
+		     int check_alignment)
 {
   void *(*hook) (size_t, size_t, const void *) =
     atomic_forced_read (__memalign_hook);
@@ -281,9 +282,15 @@  _debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
   if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)
        || !memalign_mcheck_before (alignment, &bytes, &victim)))
     {
-      victim = (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK)
-		? memalign_check (alignment, bytes)
-		: __libc_memalign (alignment, bytes));
+      if (check_alignment && !powerof2 (alignment))
+	{
+	  __set_errno (EINVAL);
+	  victim = NULL;
+	}
+      else
+	victim = (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK)
+		  ? memalign_check (alignment, bytes)
+		  : __libc_memalign (alignment, bytes));
     }
   if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK) && victim != NULL)
     victim = memalign_mcheck_after (victim, alignment, orig_bytes);
@@ -296,10 +303,15 @@  _debug_mid_memalign (size_t alignment, size_t bytes, const void *address)
 static void *
 __debug_memalign (size_t alignment, size_t bytes)
 {
-  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0));
+  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0), 0);
 }
 strong_alias (__debug_memalign, memalign)
-strong_alias (__debug_memalign, aligned_alloc)
+static void *
+__debug_aligned_alloc (size_t alignment, size_t bytes)
+{
+  return _debug_mid_memalign (alignment, bytes, RETURN_ADDRESS (0), 1);
+}
+strong_alias (__debug_aligned_alloc, aligned_alloc)
 
 static void *
 __debug_pvalloc (size_t bytes)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 76c50e3f58..09619ed168 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -656,6 +656,9 @@  libc_hidden_proto (__libc_realloc)
 void*  __libc_memalign(size_t, size_t);
 libc_hidden_proto (__libc_memalign)
 
+void * __libc_aligned_alloc (size_t alignment, size_t bytes);
+libc_hidden_proto (__libc_aligned_alloc)
+
 /*
   valloc(size_t n);
   Equivalent to memalign(pagesize, n), where pagesize is the page
@@ -3509,6 +3512,27 @@  __libc_memalign (size_t alignment, size_t bytes)
   void *address = RETURN_ADDRESS (0);
   return _mid_memalign (alignment, bytes, address);
 }
+libc_hidden_def (__libc_memalign)
+
+/* For ISO C11.  */
+void *
+__libc_aligned_alloc (size_t alignment, size_t bytes)
+{
+  if (!__malloc_initialized)
+    ptmalloc_init ();
+
+  /* Similar to memalign, but ISO C17 requires an error for invalid
+     alignments.  Valid alignments are non-negative powers of two.  */
+  if (!powerof2 (alignment))
+    {
+      __set_errno (EINVAL);
+      return 0;
+    }
+
+  void *address = RETURN_ADDRESS (0);
+  return _mid_memalign (alignment, bytes, address);
+}
+libc_hidden_def (__libc_aligned_alloc)
 
 static void *
 _mid_memalign (size_t alignment, size_t bytes, void *address)
@@ -3567,9 +3591,6 @@  _mid_memalign (size_t alignment, size_t bytes, void *address)
           ar_ptr == arena_for_chunk (mem2chunk (p)));
   return tag_new_usable (p);
 }
-/* For ISO C11.  */
-weak_alias (__libc_memalign, aligned_alloc)
-libc_hidden_def (__libc_memalign)
 
 void *
 __libc_valloc (size_t bytes)
@@ -5903,6 +5924,7 @@  weak_alias (__libc_mallinfo, mallinfo)
 strong_alias (__libc_mallinfo2, __mallinfo2)
 weak_alias (__libc_mallinfo2, mallinfo2)
 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
+weak_alias (__libc_aligned_alloc, aligned_alloc)
 
 weak_alias (__malloc_stats, malloc_stats)
 weak_alias (__malloc_usable_size, malloc_usable_size)
diff --git a/manual/memory.texi b/manual/memory.texi
index 9d3398a326..8952ff2bfa 100644
--- a/manual/memory.texi
+++ b/manual/memory.texi
@@ -995,7 +995,7 @@  power of two than that, use @code{aligned_alloc} or @code{posix_memalign}.
 @c Alias to memalign.
 The @code{aligned_alloc} function allocates a block of @var{size} bytes whose
 address is a multiple of @var{alignment}.  The @var{alignment} must be a
-power of two and @var{size} must be a multiple of @var{alignment}.
+power of two.
 
 The @code{aligned_alloc} function returns a null pointer on error and sets
 @code{errno} to one of the following values: