[v5,4/6] stdlib: Implement introsort with qsort (BZ 19305)

Message ID 20230713132540.2854320-5-adhemerval.zanella@linaro.org
State Superseded
Headers
Series Use introsort for qsort |

Checks

Context Check Description
redhat-pt-bot/TryBot-apply_patch success Patch applied to master at the time it was sent
linaro-tcwg-bot/tcwg_glibc_check--master-aarch64 success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_check--master-arm success Testing passed
linaro-tcwg-bot/tcwg_glibc_build--master-aarch64 success Testing passed

Commit Message

Adhemerval Zanella Netto July 13, 2023, 1:25 p.m. UTC
  This patch adds a introsort implementation on qsort to avoid worse-case
performance of quicksort to O(nlog n).  The heapsort fallback used is a
heapsort based on Linux implementation (commit 22a241ccb2c19962a).  As a
side note the introsort implementation is similar the one used on
libstdc++ for std::sort.

Checked on x86_64-linux-gnu.
---
 stdlib/qsort.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 86 insertions(+), 6 deletions(-)
  

Patch

diff --git a/stdlib/qsort.c b/stdlib/qsort.c
index 53013ef281..ed0ac53467 100644
--- a/stdlib/qsort.c
+++ b/stdlib/qsort.c
@@ -112,6 +112,7 @@  typedef struct
   {
     char *lo;
     char *hi;
+    size_t depth;
   } stack_node;
 
 /* The stack needs log (total_elements) entries (we could even subtract
@@ -121,22 +122,90 @@  typedef struct
 enum { STACK_SIZE = CHAR_BIT * sizeof (size_t) };
 
 static inline stack_node *
-push (stack_node *top, char *lo, char *hi)
+push (stack_node *top, char *lo, char *hi, size_t depth)
 {
   top->lo = lo;
   top->hi = hi;
+  top->depth = depth;
   return ++top;
 }
 
 static inline stack_node *
-pop (stack_node *top, char **lo, char **hi)
+pop (stack_node *top, char **lo, char **hi, size_t *depth)
 {
   --top;
   *lo = top->lo;
   *hi = top->hi;
+  *depth = top->depth;
   return top;
 }
 
+/* A fast, small, non-recursive O(nlog n) heapsort, adapted from Linux
+   lib/sort.c.  Used on introsort implementation as a fallback routine with
+   worst-case performance of O(nlog n) and worst-case space complexity of
+   O(1).  */
+
+static inline size_t
+parent (size_t i, unsigned int lsbit, size_t size)
+{
+  i -= size;
+  i -= size & -(i & lsbit); /* i.e., 'if (i & lsbit) i -= size;'  */
+  return i / 2; 
+}
+
+static void
+heapsort_r (void *base, void *end, size_t size, enum swap_type_t swap_type,
+	    __compar_d_fn_t cmp, void *arg)
+{
+  size_t num = ((uintptr_t) end - (uintptr_t) base) / size;
+  size_t n = num * size, a = (num/2) * size;
+  /* Used to find parent  */
+  const size_t lsbit = size & -size;
+
+  /* num < 2 || size == 0.  */
+  if (a == 0)
+    return;
+
+  for (;;)
+    {
+      size_t b, c, d;
+
+      if (a != 0)
+	/* Building heap: sift down --a */
+	a -= size;
+      else if (n -= size)
+	/* Sorting: Extract root to --n */
+	do_swap (base, base + n, size, swap_type);
+      else
+	break;
+
+      /* Sift element at "a" down into heap.  This is the "bottom-up" variant,
+	 which significantly reduces calls to cmp_func(): we find the
+	 sift-down path all the way to the leaves (one compare per level),
+	 then backtrack to find where to insert the target element.
+
+	 Because elements tend to sift down close to the leaves, this uses
+	 fewer compares than doing two per level on the way down.  (A bit more
+	 than half as many on average, 3/4 worst-case.).  */
+      for (b = a; c = 2 * b + size, (d = c + size) < n;)
+	b = cmp (base + c, base + d, arg) >= 0 ? c : d;
+      if (d == n)
+	/* Special case last leaf with no sibling.  */
+	b = c;
+
+      /* Now backtrack from "b" to the correct location for "a".  */
+      while (b != a && cmp (base + a, base + b, arg) >= 0)
+	b = parent (b, lsbit, size);
+      /* Where "a" belongs.  */
+      c = b;
+      while (b != a)
+	{
+	  /* Shift it into place.  */
+	  b = parent (b, lsbit, size);
+          do_swap (base + b, base + c, size, swap_type);
+        }
+    }
+}
 
 /* Order size using quicksort.  This implementation incorporates
    four optimizations discussed in Sedgewick:
@@ -222,7 +291,7 @@  _quicksort (void *const pbase, size_t total_elems, size_t size,
 
   const size_t max_thresh = MAX_THRESH * size;
 
-  if (total_elems == 0)
+  if (total_elems <= 1)
     /* Avoid lossage with unsigned arithmetic below.  */
     return;
 
@@ -234,6 +303,10 @@  _quicksort (void *const pbase, size_t total_elems, size_t size,
   else
     swap_type = SWAP_BYTES;
 
+  /* Maximum depth before quicksort switches to heapsort.  */
+  size_t depth = 2 * (sizeof (size_t) * CHAR_BIT - 1
+		      - __builtin_clzl (total_elems));
+
   if (total_elems > MAX_THRESH)
     {
       char *lo = base_ptr;
@@ -243,6 +316,13 @@  _quicksort (void *const pbase, size_t total_elems, size_t size,
 
       while (stack < top)
         {
+          if (depth == 0)
+            {
+              heapsort_r (lo, hi, size, swap_type, cmp, arg);
+              top = pop (top, &lo, &hi, &depth);
+              continue;
+            }
+
           char *left_ptr;
           char *right_ptr;
 
@@ -306,7 +386,7 @@  _quicksort (void *const pbase, size_t total_elems, size_t size,
             {
               if ((size_t) (hi - left_ptr) <= max_thresh)
 		/* Ignore both small partitions. */
-                top = pop (top, &lo, &hi);
+                top = pop (top, &lo, &hi, &depth);
               else
 		/* Ignore small left partition. */
                 lo = left_ptr;
@@ -317,13 +397,13 @@  _quicksort (void *const pbase, size_t total_elems, size_t size,
           else if ((right_ptr - lo) > (hi - left_ptr))
             {
 	      /* Push larger left partition indices. */
-              top = push (top, lo, right_ptr);
+              top = push (top, lo, right_ptr, depth - 1);
               lo = left_ptr;
             }
           else
             {
 	      /* Push larger right partition indices. */
-              top = push (top, left_ptr, hi);
+              top = push (top, left_ptr, hi, depth - 1);
               hi = right_ptr;
             }
         }