Improve bench-strstr

Message ID DB5PR08MB10308881406BC7E59B6B9D0283F30@DB5PR08MB1030.eurprd08.prod.outlook.com
State New, archived
Headers

Commit Message

Wilco Dijkstra Oct. 29, 2018, 3:48 p.m. UTC
  Improve bench-strstr by using an extract from the manual as the input
to make the test more realistic.  Use the same input for both found and
fail cases rather than using a memset of '0' for most of the string,
which measures performance of strchr rather than strstr.  Add result
checking to catch potential errors.  Remove the repeated tests at slightly
different alignments and add more large needle and haystack testcases.

ChangeLog:
2018-10-29  Wilco Dijkstra  <wdijkstr@arm.com>

	* benchtests/bench-strstr.c (input): Added realistic input text.
	(do_one_test): Increase iterations.  Add result checking.
	(do_test): Use new input text.  Remove accidental early matches.
	(test_main): Improve range of tests, reduce unaligned cases.
--
  

Comments

Carlos O'Donell Oct. 29, 2018, 4:03 p.m. UTC | #1
On 10/29/18 11:48 AM, Wilco Dijkstra wrote:
> Improve bench-strstr by using an extract from the manual as the input
> to make the test more realistic.  Use the same input for both found and
> fail cases rather than using a memset of '0' for most of the string,
> which measures performance of strchr rather than strstr.  Add result
> checking to catch potential errors.  Remove the repeated tests at slightly
> different alignments and add more large needle and haystack testcases.

Can you expand a bit more on the theory behind this change?

I can understand the desire to use english language text as the input
for strstr to ensure that the microbenchmark performance more closely
matches that which might be found parsing real English text.

I can also understand a desire for large needle and haystack testscases.

Why are we adjusting MIN_PAGE_SIZE?

Why do we remove tests at differing alignments? I presume that this
kind of alignment based test is to catch cases where the algorithm
performs poorly or expects certain alignments, or again performs poorly
when crossing page boundaries. Why would we remove it from this specific
test?

Any help you can provide in further documenting this kind of decision
making in the test with comments would be very helpful.


> ChangeLog:
> 2018-10-29  Wilco Dijkstra  <wdijkstr@arm.com>
> 
> 	* benchtests/bench-strstr.c (input): Added realistic input text.
> 	(do_one_test): Increase iterations.  Add result checking.
> 	(do_test): Use new input text.  Remove accidental early matches.
> 	(test_main): Improve range of tests, reduce unaligned cases.
> --
> 
> 
> diff --git a/benchtests/bench-strstr.c b/benchtests/bench-strstr.c
> index a31294e3c96d80a4fd61bb5b423a825fe54d3227..00cb779ab39ad01614896458bdcc7ff0d012c618 100644
> --- a/benchtests/bench-strstr.c
> +++ b/benchtests/bench-strstr.c
> @@ -16,6 +16,7 @@
>     License along with the GNU C Library; if not, see
>     <http://www.gnu.org/licenses/>.  */
>  
> +#define MIN_PAGE_SIZE 131072
>  #define TEST_MAIN
>  #define TEST_NAME "strstr"
>  #include "bench-string.h"
> @@ -26,6 +27,30 @@
>  #define __strnlen strnlen
>  #include "../string/strstr.c"
>  
> +static const char input[] =
> +"This manual is written with the assumption that you are at least "
> +"somewhat familiar with the C programming language and basic programming "
> +"concepts.  Specifically, familiarity with ISO standard C (*note ISO "
> +"C::), rather than “traditional” pre-ISO C dialects, is assumed.\n"
> +
> +"   The GNU C Library includes several “header files”, each of which "
> +"provides definitions and declarations for a group of related facilities; "
> +"this information is used by the C compiler when processing your program. "
> +"For example, the header file ‘stdio.h’ declares facilities for "
> +"performing input and output, and the header file ‘string.h’ declares "
> +"string processing utilities.  The organization of this manual generally "
> +"follows the same division as the header files.\n"
> +
> +"   If you are reading this manual for the first time, you should read "
> +"all of the introductory material and skim the remaining chapters.  There "
> +"are a _lot_ of functions in the GNU C Library and it’s not realistic to "
> +"expect that you will be able to remember exactly _how_ to use each and "
> +"every one of them.  It’s more important to become generally familiar "
> +"with the kinds of facilities that the library provides, so that when you "
> +"are writing your programs you can recognize _when_ to make use of "
> +"library functions, and _where_ in this manual you can find more specific "
> +"information about them.\n";
> +
>  
>  static char *
>  stupid_strstr (const char *s1, const char *s2)
> @@ -60,19 +85,25 @@ IMPL (strstr, 1)
>  static void
>  do_one_test (impl_t *impl, const char *s1, const char *s2, char *exp_result)
>  {
> -  size_t i, iters = INNER_LOOP_ITERS;
> +  size_t i, iters = INNER_LOOP_ITERS * 2;
>    timing_t start, stop, cur;
> +  char *res;
>  
>    TIMING_NOW (start);
>    for (i = 0; i < iters; ++i)
> -    {
> -      CALL (impl, s1, s2);
> -    }
> +    res = CALL (impl, s1, s2);
>    TIMING_NOW (stop);
>  
>    TIMING_DIFF (cur, start, stop);
>  
>    TIMING_PRINT_MEAN ((double) cur, (double) iters);
> +
> +  if (res != exp_result)
> +    {
> +      error (0, 0, "Wrong result in function %s %s %s", impl->name,
> +	     res, exp_result);
> +      ret = 1;
> +    }
>  }
>  
>  
> @@ -83,36 +114,42 @@ do_test (size_t align1, size_t align2, size_t len1, size_t len2,
>    char *s1 = (char *) (buf1 + align1);
>    char *s2 = (char *) (buf2 + align2);
>  
> -  static const char d[] = "1234567890abcdef";
> -#define dl (sizeof (d) - 1)
> -  char *ss2 = s2;
> -  for (size_t l = len2; l > 0; l = l > dl ? l - dl : 0)
> -    {
> -      size_t t = l > dl ? dl : l;
> -      ss2 = mempcpy (ss2, d, t);
> -    }
> -  s2[len2] = '\0';
> +  size_t size = sizeof (input) - 1;
> +  size_t pos = (len1 + len2) % size;
>  
> -  if (fail)
> +  char *ss2 = s2;
> +  for (size_t l = len2; l > 0; l = l > size ? l - size : 0)
>      {
> -      char *ss1 = s1;
> -      for (size_t l = len1; l > 0; l = l > dl ? l - dl : 0)
> +      size_t t = l > size ? size : l;
> +      if (pos + t <= size)
> +	ss2 = mempcpy (ss2, input + pos, t);
> +      else
>  	{
> -	  size_t t = l > dl ? dl : l;
> -	  memcpy (ss1, d, t);
> -	  ++ss1[len2 > 7 ? 7 : len2 - 1];
> -	  ss1 += t;
> +	  ss2 = mempcpy (ss2, input + pos, size - pos);
> +	  ss2 = mempcpy (ss2, input, t - (size - pos));
>  	}
>      }
> -  else
> +  s2[len2] = '\0';
> +
> +  char *ss1 = s1;
> +  for (size_t l = len1; l > 0; l = l > size ? l - size : 0)
>      {
> -      memset (s1, '0', len1);
> -      memcpy (s1 + len1 - len2, s2, len2);
> +      size_t t = l > size ? size : l;
> +      memcpy (ss1, input, t);
> +      ss1 += t;
>      }
> +
> +  if (!fail)
> +    memcpy (s1 + len1 - len2, s2, len2);
>    s1[len1] = '\0';
>  
> -  printf ("Length %4zd/%zd, alignment %2zd/%2zd, %s:",
> -	  len1, len2, align1, align2, fail ? "fail" : "found");
> +  /* Remove any accidental matches except for the last if !fail.  */
> +  for (ss1 = stupid_strstr (s1, s2); ss1; ss1 = stupid_strstr (ss1 + 1, s2))
> +    if (fail || ss1 != s1 + len1 - len2)
> +      ++ss1[len2 / 2];
> +
> +  printf ("Length %4zd/%3zd, alignment %2zd/%2zd, %s:",
> +	  len1, len2, align1, align2, fail ? "fail " : "found");
>  
>    FOR_EACH_IMPL (impl, 0)
>      do_one_test (impl, s1, s2, fail ? NULL : s1 + len1 - len2);
> @@ -130,48 +167,19 @@ test_main (void)
>      printf ("\t%s", impl->name);
>    putchar ('\n');
>  
> -  for (size_t klen = 2; klen < 32; ++klen)
> -    for (size_t hlen = 2 * klen; hlen < 16 * klen; hlen += klen)
> +  for (size_t hlen = 64; hlen <= 256; hlen += 32)
> +    for (size_t klen = 1; klen <= 16; klen++)
>        {
> -	do_test (0, 0, hlen, klen, 0);
> -	do_test (0, 0, hlen, klen, 1);
> -	do_test (0, 3, hlen, klen, 0);
> -	do_test (0, 3, hlen, klen, 1);
> -	do_test (0, 9, hlen, klen, 0);
> +	do_test (1, 3, hlen, klen, 0);
>  	do_test (0, 9, hlen, klen, 1);
> -	do_test (0, 15, hlen, klen, 0);
> -	do_test (0, 15, hlen, klen, 1);
> -
> -	do_test (3, 0, hlen, klen, 0);
> -	do_test (3, 0, hlen, klen, 1);
> -	do_test (3, 3, hlen, klen, 0);
> -	do_test (3, 3, hlen, klen, 1);
> -	do_test (3, 9, hlen, klen, 0);
> -	do_test (3, 9, hlen, klen, 1);
> -	do_test (3, 15, hlen, klen, 0);
> -	do_test (3, 15, hlen, klen, 1);
> -
> -	do_test (9, 0, hlen, klen, 0);
> -	do_test (9, 0, hlen, klen, 1);
> -	do_test (9, 3, hlen, klen, 0);
> -	do_test (9, 3, hlen, klen, 1);
> -	do_test (9, 9, hlen, klen, 0);
> -	do_test (9, 9, hlen, klen, 1);
> -	do_test (9, 15, hlen, klen, 0);
> -	do_test (9, 15, hlen, klen, 1);
> -
> -	do_test (15, 0, hlen, klen, 0);
> -	do_test (15, 0, hlen, klen, 1);
> -	do_test (15, 3, hlen, klen, 0);
> -	do_test (15, 3, hlen, klen, 1);
> -	do_test (15, 9, hlen, klen, 0);
> -	do_test (15, 9, hlen, klen, 1);
> -	do_test (15, 15, hlen, klen, 0);
> -	do_test (15, 15, hlen, klen, 1);
>        }
>  
> -  do_test (0, 0, page_size - 1, 16, 0);
> -  do_test (0, 0, page_size - 1, 16, 1);
> +  for (size_t hlen = 256; hlen <= 65536; hlen *= 2)
> +    for (size_t klen = 16; klen <= 256; klen *= 2)
> +      {
> +	do_test (1, 11, hlen, klen, 0);
> +	do_test (14, 5, hlen, klen, 1);
> +      }
>  
>    return ret;
>  }
>
  
Wilco Dijkstra Oct. 29, 2018, 6:07 p.m. UTC | #2
Hi Carlos,

>On 10/29/18 11:48 AM, Wilco Dijkstra wrote:
>> Improve bench-strstr by using an extract from the manual as the input
>> to make the test more realistic.  Use the same input for both found and
>> fail cases rather than using a memset of '0' for most of the string,
>> which measures performance of strchr rather than strstr.  Add result
>> checking to catch potential errors.  Remove the repeated tests at slightly
>> different alignments and add more large needle and haystack testcases.
>
> Can you expand a bit more on the theory behind this change?
>
> I can understand the desire to use english language text as the input
> for strstr to ensure that the microbenchmark performance more closely
> matches that which might be found parsing real English text.

Yes, the idea is to use character occurrence frequencies that are similar to 
actual uses rather than some synthetic string with a highly improbable
distribution (which also makes half the benchmark tests useless).

> I can also understand a desire for large needle and haystack testscases.

Yes, and we actually use the long needle code.

> Why are we adjusting MIN_PAGE_SIZE?

This is needed if you want the buffers to be a bit larger. It seems to reserve
only 2 pages by default, but that could mean only 2 512 byte pages rather
than a known fixed amount as required by many of the benchtests.

> Why do we remove tests at differing alignments? I presume that this
> kind of alignment based test is to catch cases where the algorithm
> performs poorly or expects certain alignments, or again performs poorly
> when crossing page boundaries. Why would we remove it from this specific
> test?

Unlike block based functions like memcpy, strstr is byte oriented, so there is
no alignment sensitivity. Obviously it calls some other string functions, but
those alignments are random and unrelated to the initial alignment.

In general alignment is way overrepresented: traces show strings are often 
aligned (far more than you'd think due to globals, alloca and malloc overaligning), 
and even if there is alignment sensitivity, the exact alignment doesn't matter at all
(beyond it's not aligned). So all the combinations of alignments are wasted effort
and just clutter the results.

Wilco
  
Carlos O'Donell Oct. 29, 2018, 11:44 p.m. UTC | #3
On 10/29/18 2:07 PM, Wilco Dijkstra wrote:
> Hi Carlos,
> 
>> On 10/29/18 11:48 AM, Wilco Dijkstra wrote:
>>> Improve bench-strstr by using an extract from the manual as the input
>>> to make the test more realistic.  Use the same input for both found and
>>> fail cases rather than using a memset of '0' for most of the string,
>>> which measures performance of strchr rather than strstr.  Add result
>>> checking to catch potential errors.  Remove the repeated tests at slightly
>>> different alignments and add more large needle and haystack testcases.
>>
>> Can you expand a bit more on the theory behind this change?
>>
>> I can understand the desire to use english language text as the input
>> for strstr to ensure that the microbenchmark performance more closely
>> matches that which might be found parsing real English text.
> 
> Yes, the idea is to use character occurrence frequencies that are similar to 
> actual uses rather than some synthetic string with a highly improbable
> distribution (which also makes half the benchmark tests useless).

OK.

>> I can also understand a desire for large needle and haystack testscases.
> 
> Yes, and we actually use the long needle code.

OK.

>> Why are we adjusting MIN_PAGE_SIZE?
> 
> This is needed if you want the buffers to be a bit larger. It seems to reserve
> only 2 pages by default, but that could mean only 2 512 byte pages rather
> than a known fixed amount as required by many of the benchtests.

Could you expand on this a bit more? Why are we using something called
MIN_PAGE_SIZE to do something entirely different?

>> Why do we remove tests at differing alignments? I presume that this
>> kind of alignment based test is to catch cases where the algorithm
>> performs poorly or expects certain alignments, or again performs poorly
>> when crossing page boundaries. Why would we remove it from this specific
>> test?
> 
> Unlike block based functions like memcpy, strstr is byte oriented, so there is
> no alignment sensitivity. Obviously it calls some other string functions, but
> those alignments are random and unrelated to the initial alignment.

The function strstr is whatever the algorithm implementing it is using?

If I were to vectorize strstr to use AVX2 or AVX-512, alignment would make
a difference, notably because you'd have unroll loops that run at the beginning
before a suitable alignment of input is reached so vector operations can do
needle searches in parallel?

Why isn't alignment relevant in this case?

> In general alignment is way overrepresented: traces show strings are often 
> aligned (far more than you'd think due to globals, alloca and malloc overaligning), 
> and even if there is alignment sensitivity, the exact alignment doesn't matter at all
> (beyond it's not aligned). So all the combinations of alignments are wasted effort
> and just clutter the results.

Ah, this is a much more cogent argument against alignment being measured, but
what we need here is data and comments around this particular issue. How did
you measure this, what were your results, and can we put them into a comment
in the sources?

My personal opinion is that we don't actually have the data to backup such
claims, so we continue to *look* at performance over alignment as just a
double check, but I agree that we should *not* weigh all of the tests equally.
  
Wilco Dijkstra Oct. 30, 2018, 3:17 p.m. UTC | #4
Hi Carlos,

>>> Why are we adjusting MIN_PAGE_SIZE?
>> 
>> This is needed if you want the buffers to be a bit larger. It seems to reserve
>> only 2 pages by default, but that could mean only 2 512 byte pages rather
>> than a known fixed amount as required by many of the benchtests.
>
> Could you expand on this a bit more? Why are we using something called
> MIN_PAGE_SIZE to do something entirely different?

No idea, I didn't write the benchmark infrastructure. Maybe the original idea
was to test strings close to page boundaries but I have not seen any string
test that actually does that.

What is important is that each test has a minimum amount of workspace it
can use, and currently that is not guaranteed unless you do what various
tests do and set MIN_PAGE_SIZE explicitly.

> If I were to vectorize strstr to use AVX2 or AVX-512, alignment would make
> a difference, notably because you'd have unroll loops that run at the beginning
> before a suitable alignment of input is reached so vector operations can do
> needle searches in parallel?
>
> Why isn't alignment relevant in this case?

Vector instructions typically support unaligned accesses, so alignment doesn't
matter. Even if you explicitly align the search loop, that's a one-off thing - quite
unlike memcpy where you are forced to do unaligned accesses for the complete
copy.

Benchmarking the existing x64 implementation (which uses SSE2) shows
there is no measurable performance difference at any alignment.

>> In general alignment is way overrepresented: traces show strings are often 
>> aligned (far more than you'd think due to globals, alloca and malloc overaligning),
>> and even if there is alignment sensitivity, the exact alignment doesn't matter at all
>> (beyond it's not aligned). So all the combinations of alignments are wasted effort
>> and just clutter the results.
>
> Ah, this is a much more cogent argument against alignment being measured, but
> what we need here is data and comments around this particular issue. How did
> you measure this, what were your results, and can we put them into a comment
> in the sources?

I don't see the issue. My version still measures unaligned cases. However
measuring all possible alignment combinations makes no sense because they
can't be any different. You're either aligned or you're unaligned.

> My personal opinion is that we don't actually have the data to backup such
> claims, so we continue to *look* at performance over alignment as just a
> double check, but I agree that we should *not* weigh all of the tests equally.

We do have the data - unlike memcpy there is no alignment sensitivity in strstr.

Wilco
  

Patch

diff --git a/benchtests/bench-strstr.c b/benchtests/bench-strstr.c
index a31294e3c96d80a4fd61bb5b423a825fe54d3227..00cb779ab39ad01614896458bdcc7ff0d012c618 100644
--- a/benchtests/bench-strstr.c
+++ b/benchtests/bench-strstr.c
@@ -16,6 +16,7 @@ 
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
+#define MIN_PAGE_SIZE 131072
 #define TEST_MAIN
 #define TEST_NAME "strstr"
 #include "bench-string.h"
@@ -26,6 +27,30 @@ 
 #define __strnlen strnlen
 #include "../string/strstr.c"
 
+static const char input[] =
+"This manual is written with the assumption that you are at least "
+"somewhat familiar with the C programming language and basic programming "
+"concepts.  Specifically, familiarity with ISO standard C (*note ISO "
+"C::), rather than “traditional” pre-ISO C dialects, is assumed.\n"
+
+"   The GNU C Library includes several “header files”, each of which "
+"provides definitions and declarations for a group of related facilities; "
+"this information is used by the C compiler when processing your program. "
+"For example, the header file ‘stdio.h’ declares facilities for "
+"performing input and output, and the header file ‘string.h’ declares "
+"string processing utilities.  The organization of this manual generally "
+"follows the same division as the header files.\n"
+
+"   If you are reading this manual for the first time, you should read "
+"all of the introductory material and skim the remaining chapters.  There "
+"are a _lot_ of functions in the GNU C Library and it’s not realistic to "
+"expect that you will be able to remember exactly _how_ to use each and "
+"every one of them.  It’s more important to become generally familiar "
+"with the kinds of facilities that the library provides, so that when you "
+"are writing your programs you can recognize _when_ to make use of "
+"library functions, and _where_ in this manual you can find more specific "
+"information about them.\n";
+
 
 static char *
 stupid_strstr (const char *s1, const char *s2)
@@ -60,19 +85,25 @@  IMPL (strstr, 1)
 static void
 do_one_test (impl_t *impl, const char *s1, const char *s2, char *exp_result)
 {
-  size_t i, iters = INNER_LOOP_ITERS;
+  size_t i, iters = INNER_LOOP_ITERS * 2;
   timing_t start, stop, cur;
+  char *res;
 
   TIMING_NOW (start);
   for (i = 0; i < iters; ++i)
-    {
-      CALL (impl, s1, s2);
-    }
+    res = CALL (impl, s1, s2);
   TIMING_NOW (stop);
 
   TIMING_DIFF (cur, start, stop);
 
   TIMING_PRINT_MEAN ((double) cur, (double) iters);
+
+  if (res != exp_result)
+    {
+      error (0, 0, "Wrong result in function %s %s %s", impl->name,
+	     res, exp_result);
+      ret = 1;
+    }
 }
 
 
@@ -83,36 +114,42 @@  do_test (size_t align1, size_t align2, size_t len1, size_t len2,
   char *s1 = (char *) (buf1 + align1);
   char *s2 = (char *) (buf2 + align2);
 
-  static const char d[] = "1234567890abcdef";
-#define dl (sizeof (d) - 1)
-  char *ss2 = s2;
-  for (size_t l = len2; l > 0; l = l > dl ? l - dl : 0)
-    {
-      size_t t = l > dl ? dl : l;
-      ss2 = mempcpy (ss2, d, t);
-    }
-  s2[len2] = '\0';
+  size_t size = sizeof (input) - 1;
+  size_t pos = (len1 + len2) % size;
 
-  if (fail)
+  char *ss2 = s2;
+  for (size_t l = len2; l > 0; l = l > size ? l - size : 0)
     {
-      char *ss1 = s1;
-      for (size_t l = len1; l > 0; l = l > dl ? l - dl : 0)
+      size_t t = l > size ? size : l;
+      if (pos + t <= size)
+	ss2 = mempcpy (ss2, input + pos, t);
+      else
 	{
-	  size_t t = l > dl ? dl : l;
-	  memcpy (ss1, d, t);
-	  ++ss1[len2 > 7 ? 7 : len2 - 1];
-	  ss1 += t;
+	  ss2 = mempcpy (ss2, input + pos, size - pos);
+	  ss2 = mempcpy (ss2, input, t - (size - pos));
 	}
     }
-  else
+  s2[len2] = '\0';
+
+  char *ss1 = s1;
+  for (size_t l = len1; l > 0; l = l > size ? l - size : 0)
     {
-      memset (s1, '0', len1);
-      memcpy (s1 + len1 - len2, s2, len2);
+      size_t t = l > size ? size : l;
+      memcpy (ss1, input, t);
+      ss1 += t;
     }
+
+  if (!fail)
+    memcpy (s1 + len1 - len2, s2, len2);
   s1[len1] = '\0';
 
-  printf ("Length %4zd/%zd, alignment %2zd/%2zd, %s:",
-	  len1, len2, align1, align2, fail ? "fail" : "found");
+  /* Remove any accidental matches except for the last if !fail.  */
+  for (ss1 = stupid_strstr (s1, s2); ss1; ss1 = stupid_strstr (ss1 + 1, s2))
+    if (fail || ss1 != s1 + len1 - len2)
+      ++ss1[len2 / 2];
+
+  printf ("Length %4zd/%3zd, alignment %2zd/%2zd, %s:",
+	  len1, len2, align1, align2, fail ? "fail " : "found");
 
   FOR_EACH_IMPL (impl, 0)
     do_one_test (impl, s1, s2, fail ? NULL : s1 + len1 - len2);
@@ -130,48 +167,19 @@  test_main (void)
     printf ("\t%s", impl->name);
   putchar ('\n');
 
-  for (size_t klen = 2; klen < 32; ++klen)
-    for (size_t hlen = 2 * klen; hlen < 16 * klen; hlen += klen)
+  for (size_t hlen = 64; hlen <= 256; hlen += 32)
+    for (size_t klen = 1; klen <= 16; klen++)
       {
-	do_test (0, 0, hlen, klen, 0);
-	do_test (0, 0, hlen, klen, 1);
-	do_test (0, 3, hlen, klen, 0);
-	do_test (0, 3, hlen, klen, 1);
-	do_test (0, 9, hlen, klen, 0);
+	do_test (1, 3, hlen, klen, 0);
 	do_test (0, 9, hlen, klen, 1);
-	do_test (0, 15, hlen, klen, 0);
-	do_test (0, 15, hlen, klen, 1);
-
-	do_test (3, 0, hlen, klen, 0);
-	do_test (3, 0, hlen, klen, 1);
-	do_test (3, 3, hlen, klen, 0);
-	do_test (3, 3, hlen, klen, 1);
-	do_test (3, 9, hlen, klen, 0);
-	do_test (3, 9, hlen, klen, 1);
-	do_test (3, 15, hlen, klen, 0);
-	do_test (3, 15, hlen, klen, 1);
-
-	do_test (9, 0, hlen, klen, 0);
-	do_test (9, 0, hlen, klen, 1);
-	do_test (9, 3, hlen, klen, 0);
-	do_test (9, 3, hlen, klen, 1);
-	do_test (9, 9, hlen, klen, 0);
-	do_test (9, 9, hlen, klen, 1);
-	do_test (9, 15, hlen, klen, 0);
-	do_test (9, 15, hlen, klen, 1);
-
-	do_test (15, 0, hlen, klen, 0);
-	do_test (15, 0, hlen, klen, 1);
-	do_test (15, 3, hlen, klen, 0);
-	do_test (15, 3, hlen, klen, 1);
-	do_test (15, 9, hlen, klen, 0);
-	do_test (15, 9, hlen, klen, 1);
-	do_test (15, 15, hlen, klen, 0);
-	do_test (15, 15, hlen, klen, 1);
       }
 
-  do_test (0, 0, page_size - 1, 16, 0);
-  do_test (0, 0, page_size - 1, 16, 1);
+  for (size_t hlen = 256; hlen <= 65536; hlen *= 2)
+    for (size_t klen = 16; klen <= 256; klen *= 2)
+      {
+	do_test (1, 11, hlen, klen, 0);
+	do_test (14, 5, hlen, klen, 1);
+      }
 
   return ret;
 }