crypt: Get rid of alloca usage in __sha256_crypt_r

Message ID 20230918202631.2828791-1-josimmon@redhat.com
State Superseded
Headers
Series crypt: Get rid of alloca usage in __sha256_crypt_r |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
redhat-pt-bot/TryBot-32bit success Build for i686
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Testing passed

Commit Message

Joe Simmons-Talbott Sept. 18, 2023, 8:26 p.m. UTC
  Replace alloca usage with scratch_buffers to avoid potential stack
overflow.
---
 crypt/sha256-crypt.c | 65 ++++++++++++++++++++++++++++----------------
 1 file changed, 42 insertions(+), 23 deletions(-)
  

Comments

Adhemerval Zanella Netto Sept. 19, 2023, 4:18 p.m. UTC | #1
On 18/09/23 17:26, Joe Simmons-Talbott wrote:
> Replace alloca usage with scratch_buffers to avoid potential stack
> overflow.

Zack,

Would it possible to import libxcrypt code for sha256/sha512/md5 that
already does not user alloca or malloc? I think it would be better to
just touch a code that is already deprecated and meant to be used as
fallback only.

As a side-note, do we still need the USE_NSS/fips support on this now
that is not built by default?

> ---
>  crypt/sha256-crypt.c | 65 ++++++++++++++++++++++++++++----------------
>  1 file changed, 42 insertions(+), 23 deletions(-)
> 
> diff --git a/crypt/sha256-crypt.c b/crypt/sha256-crypt.c
> index e90eb590bb..d686c67cd5 100644
> --- a/crypt/sha256-crypt.c
> +++ b/crypt/sha256-crypt.c
> @@ -18,6 +18,7 @@
>  
>  #include <assert.h>
>  #include <errno.h>
> +#include <scratch_buffer.h>
>  #include <stdbool.h>
>  #include <stdlib.h>
>  #include <string.h>
> @@ -118,6 +119,14 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>    size_t alloca_used = 0;
>    char *free_key = NULL;
>    char *free_pbytes = NULL;
> +  struct scratch_buffer sbuf;
> +  scratch_buffer_init (&sbuf);
> +  struct scratch_buffer p_bytes_buf;
> +  scratch_buffer_init (&p_bytes_buf);
> +  struct scratch_buffer salt_buf;
> +  scratch_buffer_init (&salt_buf);
> +  struct scratch_buffer s_bytes_buf;
> +  scratch_buffer_init (&s_bytes_buf);
>  
>    /* Find beginning of salt string.  The prefix should normally always
>       be present.  Just in case it is not.  */
> @@ -146,14 +155,11 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>      {
>        char *tmp;
>  
> -      if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint32_t)))
> -	tmp = alloca_account (key_len + __alignof__ (uint32_t), alloca_used);
> -      else
> -	{
> -	  free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
> -	  if (tmp == NULL)
> -	    return NULL;
> -	}
> +      if (!scratch_buffer_set_array_size (
> +		&sbuf, 1, key_len + __alignof__ (uint32_t)))
> +        return NULL;
> +
> +      tmp = sbuf.data;
>  
>        key = copied_key =
>  	memcpy (tmp + __alignof__ (uint32_t)
> @@ -164,8 +170,14 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>  
>    if (((uintptr_t) salt) % __alignof__ (uint32_t) != 0)
>      {
> -      char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
> -      alloca_used += salt_len + __alignof__ (uint32_t);
> +      char *tmp;
> +      if (!scratch_buffer_set_array_size (
> +		&salt_buf, 1, salt_len + __alignof__ (uint32_t)))
> +	{
> +	  scratch_buffer_free (&sbuf);
> +	  return NULL;
> +	}
> +      tmp = salt_buf.data;
>        salt = copied_salt =
>  	memcpy (tmp + __alignof__ (uint32_t)
>  		- ((uintptr_t) tmp) % __alignof__ (uint32_t),
> @@ -178,7 +190,8 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>    NSSLOWInitContext *nss_ictx = NSSLOW_Init ();
>    if (nss_ictx == NULL)
>      {
> -      free (free_key);
> +      scratch_buffer_free (&sbuf);
> +      scratch_buffer_free (&salt_buf);
>        return NULL;
>      }
>    NSSLOWHASHContext *nss_ctx = NULL;
> @@ -243,17 +256,13 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>    sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
>  
>    /* Create byte sequence P.  */
> -  if (__libc_use_alloca (alloca_used + key_len))
> -    cp = p_bytes = (char *) alloca (key_len);
> -  else
> +  if (!scratch_buffer_set_array_size (&p_bytes_buf, 1, key_len))
>      {
> -      free_pbytes = cp = p_bytes = (char *)malloc (key_len);
> -      if (free_pbytes == NULL)
> -	{
> -	  free (free_key);
> -	  return NULL;
> -	}
> +      scratch_buffer_free (&sbuf);
> +      scratch_buffer_free (&salt_buf);
> +      return NULL;
>      }
> +  cp = p_bytes = p_bytes_buf.data;
>  
>    for (cnt = key_len; cnt >= 32; cnt -= 32)
>      cp = mempcpy (cp, temp_result, 32);
> @@ -270,7 +279,15 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>    sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
>  
>    /* Create byte sequence S.  */
> -  cp = s_bytes = alloca (salt_len);
> +  if (!scratch_buffer_set_array_size (&s_bytes_buf, 1, salt_len))
> +    {
> +      scratch_buffer_free (&sbuf);
> +      scratch_buffer_free (&p_bytes_buf);
> +      scratch_buffer_free (&salt_buf);
> +      return NULL;
> +    }
> +  cp = s_bytes = s_bytes_buf.data;
> +
>    for (cnt = salt_len; cnt >= 32; cnt -= 32)
>      cp = mempcpy (cp, temp_result, 32);
>    memcpy (cp, temp_result, cnt);
> @@ -381,8 +398,10 @@ __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
>    if (copied_salt != NULL)
>      explicit_bzero (copied_salt, salt_len);
>  
> -  free (free_key);
> -  free (free_pbytes);
> +  scratch_buffer_free (&sbuf);
> +  scratch_buffer_free (&p_bytes_buf);
> +  scratch_buffer_free (&salt_buf);
> +  scratch_buffer_free (&s_bytes_buf);
>    return buffer;
>  }
>
  
Zack Weinberg Sept. 20, 2023, 3:23 p.m. UTC | #2
On Tue, Sep 19, 2023, at 12:18 PM, Adhemerval Zanella Netto wrote:
> On 18/09/23 17:26, Joe Simmons-Talbott wrote:
>> Replace alloca usage with scratch_buffers to avoid potential stack
>> overflow.
> Would it possible to import libxcrypt code for sha256/sha512/md5 that
> already does not user alloca or malloc?

I would have no objection. I thought the plan was to delete libc's crypt altogether,
though? If that's slated to happen within the next couple of releases, it might be
less churn to take Joe's patch, or just to do the deletion now.

cc:ing Bjoern Esser in case he has an opinion.

zw
  
Adhemerval Zanella Netto Sept. 21, 2023, 12:59 p.m. UTC | #3
On 20/09/23 12:23, Zack Weinberg wrote:
> On Tue, Sep 19, 2023, at 12:18 PM, Adhemerval Zanella Netto wrote:
>> On 18/09/23 17:26, Joe Simmons-Talbott wrote:
>>> Replace alloca usage with scratch_buffers to avoid potential stack
>>> overflow.
>> Would it possible to import libxcrypt code for sha256/sha512/md5 that
>> already does not user alloca or malloc?
> 
> I would have no objection. I thought the plan was to delete libc's crypt altogether,
> though? If that's slated to happen within the next couple of releases, it might be
> less churn to take Joe's patch, or just to do the deletion now.
> 
> cc:ing Bjoern Esser in case he has an opinion.

In fact I think it a good time to start the libcrypt removal.
  

Patch

diff --git a/crypt/sha256-crypt.c b/crypt/sha256-crypt.c
index e90eb590bb..d686c67cd5 100644
--- a/crypt/sha256-crypt.c
+++ b/crypt/sha256-crypt.c
@@ -18,6 +18,7 @@ 
 
 #include <assert.h>
 #include <errno.h>
+#include <scratch_buffer.h>
 #include <stdbool.h>
 #include <stdlib.h>
 #include <string.h>
@@ -118,6 +119,14 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
   size_t alloca_used = 0;
   char *free_key = NULL;
   char *free_pbytes = NULL;
+  struct scratch_buffer sbuf;
+  scratch_buffer_init (&sbuf);
+  struct scratch_buffer p_bytes_buf;
+  scratch_buffer_init (&p_bytes_buf);
+  struct scratch_buffer salt_buf;
+  scratch_buffer_init (&salt_buf);
+  struct scratch_buffer s_bytes_buf;
+  scratch_buffer_init (&s_bytes_buf);
 
   /* Find beginning of salt string.  The prefix should normally always
      be present.  Just in case it is not.  */
@@ -146,14 +155,11 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
     {
       char *tmp;
 
-      if (__libc_use_alloca (alloca_used + key_len + __alignof__ (uint32_t)))
-	tmp = alloca_account (key_len + __alignof__ (uint32_t), alloca_used);
-      else
-	{
-	  free_key = tmp = (char *) malloc (key_len + __alignof__ (uint32_t));
-	  if (tmp == NULL)
-	    return NULL;
-	}
+      if (!scratch_buffer_set_array_size (
+		&sbuf, 1, key_len + __alignof__ (uint32_t)))
+        return NULL;
+
+      tmp = sbuf.data;
 
       key = copied_key =
 	memcpy (tmp + __alignof__ (uint32_t)
@@ -164,8 +170,14 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
 
   if (((uintptr_t) salt) % __alignof__ (uint32_t) != 0)
     {
-      char *tmp = (char *) alloca (salt_len + __alignof__ (uint32_t));
-      alloca_used += salt_len + __alignof__ (uint32_t);
+      char *tmp;
+      if (!scratch_buffer_set_array_size (
+		&salt_buf, 1, salt_len + __alignof__ (uint32_t)))
+	{
+	  scratch_buffer_free (&sbuf);
+	  return NULL;
+	}
+      tmp = salt_buf.data;
       salt = copied_salt =
 	memcpy (tmp + __alignof__ (uint32_t)
 		- ((uintptr_t) tmp) % __alignof__ (uint32_t),
@@ -178,7 +190,8 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
   NSSLOWInitContext *nss_ictx = NSSLOW_Init ();
   if (nss_ictx == NULL)
     {
-      free (free_key);
+      scratch_buffer_free (&sbuf);
+      scratch_buffer_free (&salt_buf);
       return NULL;
     }
   NSSLOWHASHContext *nss_ctx = NULL;
@@ -243,17 +256,13 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
   sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
 
   /* Create byte sequence P.  */
-  if (__libc_use_alloca (alloca_used + key_len))
-    cp = p_bytes = (char *) alloca (key_len);
-  else
+  if (!scratch_buffer_set_array_size (&p_bytes_buf, 1, key_len))
     {
-      free_pbytes = cp = p_bytes = (char *)malloc (key_len);
-      if (free_pbytes == NULL)
-	{
-	  free (free_key);
-	  return NULL;
-	}
+      scratch_buffer_free (&sbuf);
+      scratch_buffer_free (&salt_buf);
+      return NULL;
     }
+  cp = p_bytes = p_bytes_buf.data;
 
   for (cnt = key_len; cnt >= 32; cnt -= 32)
     cp = mempcpy (cp, temp_result, 32);
@@ -270,7 +279,15 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
   sha256_finish_ctx (&alt_ctx, nss_alt_ctx, temp_result);
 
   /* Create byte sequence S.  */
-  cp = s_bytes = alloca (salt_len);
+  if (!scratch_buffer_set_array_size (&s_bytes_buf, 1, salt_len))
+    {
+      scratch_buffer_free (&sbuf);
+      scratch_buffer_free (&p_bytes_buf);
+      scratch_buffer_free (&salt_buf);
+      return NULL;
+    }
+  cp = s_bytes = s_bytes_buf.data;
+
   for (cnt = salt_len; cnt >= 32; cnt -= 32)
     cp = mempcpy (cp, temp_result, 32);
   memcpy (cp, temp_result, cnt);
@@ -381,8 +398,10 @@  __sha256_crypt_r (const char *key, const char *salt, char *buffer, int buflen)
   if (copied_salt != NULL)
     explicit_bzero (copied_salt, salt_len);
 
-  free (free_key);
-  free (free_pbytes);
+  scratch_buffer_free (&sbuf);
+  scratch_buffer_free (&p_bytes_buf);
+  scratch_buffer_free (&salt_buf);
+  scratch_buffer_free (&s_bytes_buf);
   return buffer;
 }