glibc 2.21 - Machine maintainers, please test your machines.

Message ID CAMe9rOo7AGUs=Y+4O4GRNPPQbEMyAuteJng2vsJ0JkCjcig+Xg@mail.gmail.com
State Not applicable
Headers

Commit Message

H.J. Lu Jan. 23, 2015, 10:34 p.m. UTC
  On Fri, Jan 23, 2015 at 2:13 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
> On Fri, Jan 23, 2015 at 2:10 PM, Carlos O'Donell <carlos@redhat.com> wrote:
>> On 01/23/2015 05:03 PM, H.J. Lu wrote:
>>> On Fri, Jan 23, 2015 at 1:32 PM, Carlos O'Donell <carlos@redhat.com> wrote:
>>>> Dear Machine Maintainers,
>>>>
>>>> Please start testing your machines against glibc
>>>> master.
>>>>
>>>> Please update the glibc 2.21 release page with your
>>>> testing results:
>>>>
>>>> https://sourceware.org/glibc/wiki/Release/2.21
>>>>
>>>> If nobody objects I want to cut the release as soon
>>>
>>> x32 failed to build:
>>>
>>> https://sourceware.org/bugzilla/show_bug.cgi?id=17870
>>
>> Thanks, I'll look into this immediately.
>>
>> Cheers,
>> Carlos.
>>
>>
>
> I am testing this and will check it in after testing it on
> x32, x86-64 and ia32.
>
>

This patch uses uint64_t and (uint64_t) 1 instead of
unsigned long int and 1UL.  I am re-running the test
on x32, x86-64 and ia32.  I will check it in if there is
no regression.
  

Patch

diff --git a/ChangeLog b/ChangeLog
index 051a7c4..a59266f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@ 
+2015-01-23  H.J. Lu  <hongjiu.lu@intel.com>
+
+	[BZ #17870]
+	* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
+	with uint64_t.
+	* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
+	(uint64_t) 1.
+	(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
+	Replace 1UL with (uint64_t) 1.
+	* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
+	int with uint64_t.
+
 2015-01-23  Roland McGrath  <roland@hack.frob.com>
 
 	* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.
diff --git a/NEWS b/NEWS
index fd6da90..0ce4352 100644
--- a/NEWS
+++ b/NEWS
@@ -18,7 +18,7 @@  Version 2.21
   17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
   17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
   17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
-  17844, 17848
+  17844, 17848, 17870
 
 * A new semaphore algorithm has been implemented in generic C code for all
   machines. Previous custom assembly implementations of semaphore were
diff --git a/nptl/sem_post.c b/nptl/sem_post.c
index 9162e4c..6e495ed 100644
--- a/nptl/sem_post.c
+++ b/nptl/sem_post.c
@@ -65,7 +65,7 @@  __new_sem_post (sem_t *sem)
      added tokens before (the release sequence includes atomic RMW operations
      by other threads).  */
   /* TODO Use atomic_fetch_add to make it scale better than a CAS loop?  */
-  unsigned long int d = atomic_load_relaxed (&isem->data);
+  uint64_t d = atomic_load_relaxed (&isem->data);
   do
     {
       if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 96848d7..c60daa3 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -187,7 +187,7 @@  __sem_wait_cleanup (void *arg)
 
 #if __HAVE_64B_ATOMICS
   /* Stop being registered as a waiter.  See below for MO.  */
-  atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
+  atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 #else
   __sem_wait_32_finish (sem);
 #endif
@@ -263,8 +263,8 @@  __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 #if __HAVE_64B_ATOMICS
   /* Add a waiter.  Relaxed MO is sufficient because we can rely on the
      ordering provided by the RMW operations we use.  */
-  unsigned long d = atomic_fetch_add_relaxed (&sem->data,
-      1UL << SEM_NWAITERS_SHIFT);
+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+      (uint64_t) 1 << SEM_NWAITERS_SHIFT);
 
   pthread_cleanup_push (__sem_wait_cleanup, sem);
 
@@ -304,7 +304,7 @@  __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	      err = -1;
 	      /* Stop being registered as a waiter.  */
 	      atomic_fetch_add_relaxed (&sem->data,
-		  -(1UL << SEM_NWAITERS_SHIFT));
+		  -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 	      break;
 	    }
 	  /* Relaxed MO is sufficient; see below.  */
@@ -320,7 +320,7 @@  __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	     up-to-date value; the futex_wait or the CAS perform the real
 	     work.  */
 	  if (atomic_compare_exchange_weak_acquire (&sem->data,
-	      &d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
+	      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
 	    {
 	      err = 0;
 	      break;
diff --git a/sysdeps/nptl/internaltypes.h b/sysdeps/nptl/internaltypes.h
index 7c0d240..8f5cfa4 100644
--- a/sysdeps/nptl/internaltypes.h
+++ b/sysdeps/nptl/internaltypes.h
@@ -155,7 +155,7 @@  struct new_sem
 # endif
 # define SEM_NWAITERS_SHIFT 32
 # define SEM_VALUE_MASK (~(unsigned int)0)
-  unsigned long int data;
+  uint64_t data;
   int private;
   int pad;
 #else