From patchwork Tue Dec 11 22:46:58 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: leonardo.sandoval.gonzalez@linux.intel.com X-Patchwork-Id: 30637 Received: (qmail 46051 invoked by alias); 11 Dec 2018 22:47:08 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 45903 invoked by uid 89); 11 Dec 2018 22:47:07 -0000 Authentication-Results: sourceware.org; auth=none X-Spam-SWARE-Status: No, score=-25.9 required=5.0 tests=BAYES_00, GIT_PATCH_0, GIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, KAM_LAZY_DOMAIN_SECURITY autolearn=ham version=3.3.2 spammy= X-HELO: mga11.intel.com From: leonardo.sandoval.gonzalez@linux.intel.com To: libc-alpha@sourceware.org Cc: Leonardo Sandoval Subject: [PATCH v2 2/3] benchtests: include --stats parameter Date: Tue, 11 Dec 2018 16:46:58 -0600 Message-Id: <20181211224659.29876-3-leonardo.sandoval.gonzalez@linux.intel.com> In-Reply-To: <20181211224659.29876-1-leonardo.sandoval.gonzalez@linux.intel.com> References: <20181211224659.29876-1-leonardo.sandoval.gonzalez@linux.intel.com> MIME-Version: 1.0 From: Leonardo Sandoval Allows user to pick a statistic, defaulting to min and mean, from command line. At the same time, if stat does not exit, catch the run-time exception and keep comparing the rest of benchmarked functions. Finally, take care of division-by-zero exceptions and as the latter, keep comparing the rest of the functions, turning the script a bit more fault tolerant thus useful. * benchtests/scripts/compare_bench.py (do_compare): Catch KeyError and ZeroDivisorError exceptions. * benchtests/scripts/compare_bench.py (compare_runs): Use stats argument to loop through user provided statistics. * benchtests/scripts/compare_bench.py (main): Include the --stats argument. --- benchtests/scripts/compare_bench.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py index 9cbbda6be6..f0c9bf7a7d 100755 --- a/benchtests/scripts/compare_bench.py +++ b/benchtests/scripts/compare_bench.py @@ -42,17 +42,25 @@ def do_compare(func, var, tl1, tl2, par, threshold): threshold: The threshold for differences, beyond which the script should print a warning. """ - d = abs(tl2[par] - tl1[par]) * 100 / tl1[str(par)] + try: + v1 = tl1[str(par)] + v2 = tl2[str(par)] + d = abs(v2 - v1) * 100 / v1 + except KeyError: + return + except ZeroDivisionError: + return + if d > threshold: - if tl1[par] > tl2[par]: + if v1 > v2: ind = '+++' else: ind = '---' print('%s %s(%s)[%s]: (%.2lf%%) from %g to %g' % - (ind, func, var, par, d, tl1[par], tl2[par])) + (ind, func, var, par, d, v1, v2)) -def compare_runs(pts1, pts2, threshold): +def compare_runs(pts1, pts2, threshold, stats): """Compare two benchmark runs Args: @@ -70,8 +78,8 @@ def compare_runs(pts1, pts2, threshold): # Compare the consolidated numbers # do_compare(func, var, tl1, tl2, 'max', threshold) - do_compare(func, var, tl1, tl2, 'min', threshold) - do_compare(func, var, tl1, tl2, 'mean', threshold) + for stat in stats.split(): + do_compare(func, var, tl1, tl2, stat, threshold) # Skip over to the next variant or function if there is no detailed # timing info for the function variant. @@ -152,7 +160,7 @@ def plot_graphs(bench1, bench2): print('Writing out %s' % filename) pylab.savefig(filename) -def main(bench1, bench2, schema, threshold): +def main(bench1, bench2, schema, threshold, stats): bench1 = bench.parse_bench(bench1, schema) bench2 = bench.parse_bench(bench2, schema) @@ -161,7 +169,7 @@ def main(bench1, bench2, schema, threshold): bench.compress_timings(bench1) bench.compress_timings(bench2) - compare_runs(bench1, bench2, threshold) + compare_runs(bench1, bench2, threshold, stats) if __name__ == '__main__': @@ -176,7 +184,8 @@ if __name__ == '__main__': default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'), help='JSON file to validate source/dest files (default: %(default)s)') parser.add_argument('--threshold', default=10.0, type=float, help='Only print those with equal or higher threshold (default: %(default)s)') + parser.add_argument('--stats', default='min mean', type=str, help='Only consider values from the statistics specified as a space separated list (default: %(default)s)') args = parser.parse_args() - main(args.bench1, args.bench2, args.schema, args.threshold) + main(args.bench1, args.bench2, args.schema, args.threshold, args.stats)