From e24bbce044168517b807d3a66f68ba303a79fd81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Fri, 21 Oct 2016 21:03:59 +0200 Subject: [PATCH 1/6] Add mechanism to skip benchmarks The idea is that in some cases, we might not want to run a benchmark. The typical is example is benchmarking various algorithms with increasing problem sizes (using *parameters*). For high values of `N`, we might want to skip those algorithms with poor complexity, but still compare the other algorithms. This is highlighted in the new `example8`, where various sorting algorithms are tested. When running it like this: ``` bin/examples/example8 -p N:*:1:2:20 -r html ``` the following output is obtained: https://sinusoid.es/misc/nonius/skip-example8.html The way has been implemented includes the following changes: - Benchmarks continue running after one benchmark throws an exception. This means that one can skip the benchmark just by throwing an exception. Also, this means that the system is more resilient now and it outputs all other available results even if a benchmark actually fails because of an error condition. - A new kind of exception `skip_error` has been added to explicitly signal that we would like to skip this benchmark. This exception may be thrown with `nonius::skip()` for convenience. - The HTML reporter now takes into account that some benchmark runs may be missing results and generates sensible output in that case. --- examples/example8.c++ | 98 ++++++++++++++++++++++ include/nonius/go.h++ | 46 +++++----- include/nonius/reporters/html_reporter.h++ | 15 ++-- tpl/report.tpl.js | 10 ++- 4 files changed, 136 insertions(+), 33 deletions(-) create mode 100644 examples/example8.c++ diff --git a/examples/example8.c++ b/examples/example8.c++ new file mode 100644 index 0000000..ca4cf1c --- /dev/null +++ b/examples/example8.c++ @@ -0,0 +1,98 @@ +#define NONIUS_RUNNER +#include + +#include +#include +#include +#include +#include + +NONIUS_PARAM(N, std::size_t{100}) +NONIUS_PARAM(QUADRATIC_MAX_N, std::size_t{10000}) + +template +std::vector make_random_vector(std::size_t n) +{ + auto v = std::vector(n); + std::iota(v.begin(), v.end(), IntT{}); + std::shuffle(v.begin(), v.end(), std::default_random_engine{42}); + return v; +} + +template +void bubble_sort(Iter first, Iter last) +{ + using std::swap; + auto sorted = false; + if (first == last) return; + do { + sorted = true; + for (auto it = first, nxt = next(it); + nxt != last; + it = nxt, nxt = next(it)) + { + if (*it > *nxt) { + swap(*it, *nxt); + sorted = false; + } + } + } while (!sorted); +} + +template +void insert_sort(Iter first, Iter last) +{ + using std::swap; + for (auto it = first; it != last; ++it) + swap(*it, *std::min_element(it, last)); +} + +NONIUS_BENCHMARK("std::sort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + std::sort(vs[r].begin(), vs[r].end()); + }); +}) + +NONIUS_BENCHMARK("std::qsort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + std::qsort( + &vs[r][0], vs[r].size(), sizeof(int), + [] (const void* a, const void* b) { + return *static_cast(a) - *static_cast(b); + }); + }); +}) + +NONIUS_BENCHMARK("bubble_sort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + if (n >= meter.param()) + nonius::skip(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + bubble_sort(vs[r].begin(), vs[r].end()); + }); +}) + +NONIUS_BENCHMARK("insert_sort", [](nonius::parameters params) +{ + auto n = params.get(); + if (n >= params.get()) + nonius::skip(); + return [=](nonius::chronometer meter) { + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + insert_sort(vs[r].begin(), vs[r].end()); + }); + }; +}) diff --git a/include/nonius/go.h++ b/include/nonius/go.h++ index b4994c1..20056e0 100644 --- a/include/nonius/go.h++ +++ b/include/nonius/go.h++ @@ -57,14 +57,14 @@ namespace nonius { } }; - template - detail::CompleteType> user_code(reporter& rep, Fun&& fun) { - try { - return detail::complete_invoke(std::forward(fun)); - } catch(...) { - rep.benchmark_failure(std::current_exception()); - throw benchmark_user_error(); + struct skip_error : virtual std::exception { + const char* what() const NONIUS_NOEXCEPT override { + return "benchmark was skipped"; } + }; + + inline void skip() { + throw skip_error{}; } inline std::vector generate_params(param_configuration cfg) { @@ -117,24 +117,22 @@ namespace nonius { rep.params_start(params); for (auto&& bench : benchmarks) { rep.benchmark_start(bench.name); - - auto plan = user_code(rep, [&]{ - return bench.template prepare(cfg, params, env); - }); - - rep.measurement_start(plan); - auto samples = user_code(rep, [&]{ - return plan.template run(cfg, env); - }); - rep.measurement_complete(std::vector(samples.begin(), samples.end())); - - if(!cfg.no_analysis) { - rep.analysis_start(); - auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end()); - rep.analysis_complete(analysis); + try { + auto plan = bench.template prepare(cfg, params, env); + + rep.measurement_start(plan); + auto samples = plan.template run(cfg, env); + rep.measurement_complete(std::vector(samples.begin(), samples.end())); + + if(!cfg.no_analysis) { + rep.analysis_start(); + auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end()); + rep.analysis_complete(analysis); + } + rep.benchmark_complete(); + } catch (...) { + rep.benchmark_failure(std::current_exception()); } - - rep.benchmark_complete(); } rep.params_complete(); } diff --git a/include/nonius/reporters/html_reporter.h++ b/include/nonius/reporters/html_reporter.h++ index 8e1aa21..537b91c 100644 --- a/include/nonius/reporters/html_reporter.h++ +++ b/include/nonius/reporters/html_reporter.h++ @@ -124,10 +124,14 @@ namespace nonius { for(auto d : r.data) { cpptempl::data_map item; item["name"] = escape(d.first); - item["mean"] = truncate(d.second.analysis.mean.point.count() * magnitude); - item["stddev"] = truncate(d.second.analysis.standard_deviation.point.count() * magnitude); - for(auto e : d.second.samples) - item["samples"].push_back(truncate(e.count() * magnitude)); + cpptempl::data_map data; + if (!d.second.samples.empty()) { + data["mean"] = truncate(d.second.analysis.mean.point.count() * magnitude); + data["stddev"] = truncate(d.second.analysis.standard_deviation.point.count() * magnitude); + for(auto e : d.second.samples) + data["samples"].push_back(truncate(e.count() * magnitude)); + } + item["data"] = data; run_item["benchmarks"].push_back(item); } map["runs"].push_back(run_item); @@ -147,7 +151,8 @@ namespace nonius { mins.reserve(runs.size() * runs.front().data.size()); for (auto&& r : runs) { for(auto d : r.data) { - mins.push_back(*std::min_element(d.second.samples.begin(), d.second.samples.end())); + if (d.second.samples.begin() != d.second.samples.end()) + mins.push_back(*std::min_element(d.second.samples.begin(), d.second.samples.end())); } } auto min = *std::min_element(mins.begin(), mins.end()); diff --git a/tpl/report.tpl.js b/tpl/report.tpl.js index 6914263..0ae23ed 100644 --- a/tpl/report.tpl.js +++ b/tpl/report.tpl.js @@ -13,11 +13,13 @@ benchmarks: [ {% for benchmark in run.benchmarks %}{ name: '{$benchmark.name}', - mean: {$benchmark.mean}, - stddev: {$benchmark.stddev}, + {%if benchmark.data %} + mean: {$benchmark.data.mean}, + stddev: {$benchmark.data.stddev}, samples: [ - {% for sample in benchmark.samples %}{$sample}, {% endfor %} + {% for sample in benchmark.data.samples %}{$sample}, {% endfor %} ], + {% endif %} },{% endfor %} ] },{% endfor %} @@ -62,7 +64,7 @@ mode: 'markers', marker: { symbol: i }, y: b.samples, - x: b.samples.map(function (_, i) { return i; }) + x: b.samples && b.samples.map(function (_, i) { return i; }) } }); var layout = { From 55ade0c33781319a0558865b8c426125ec234685 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Fri, 21 Oct 2016 21:13:58 +0200 Subject: [PATCH 2/6] Update auto-generated files --- include/nonius/detail/html_report_template.g.h++ | 11 +++++++---- tpl/html_report.g.tpl | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/include/nonius/detail/html_report_template.g.h++ b/include/nonius/detail/html_report_template.g.h++ index e7eea1c..f80be9b 100644 --- a/include/nonius/detail/html_report_template.g.h++ +++ b/include/nonius/detail/html_report_template.g.h++ @@ -75,6 +75,7 @@ ".select select {\n" " outline: none;\n" " -webkit-appearance: none;\n" +" -moz-appearance: none;\n" " display: block;\n" " padding: 0 3em 0 1.5em;\n" " margin: 0.3em;\n" @@ -298,11 +299,13 @@ " benchmarks: [\n" " {% for benchmark in run.benchmarks %}{\n" " name: '{$benchmark.name}',\n" -" mean: {$benchmark.mean},\n" -" stddev: {$benchmark.stddev},\n" +" {%if benchmark.data %}\n" +" mean: {$benchmark.data.mean},\n" +" stddev: {$benchmark.data.stddev},\n" " samples: [\n" -" {% for sample in benchmark.samples %}{$sample}, {% endfor %}\n" +" {% for sample in benchmark.data.samples %}{$sample}, {% endfor %}\n" " ],\n" +" {% endif %}\n" " },{% endfor %}\n" " ]\n" " },{% endfor %}\n" @@ -347,7 +350,7 @@ " mode: 'markers',\n" " marker: { symbol: i },\n" " y: b.samples,\n" -" x: b.samples.map(function (_, i) { return i; })\n" +" x: b.samples && b.samples.map(function (_, i) { return i; })\n" " }\n" " });\n" " var layout = {\n" diff --git a/tpl/html_report.g.tpl b/tpl/html_report.g.tpl index c98480f..5fb5b01 100644 --- a/tpl/html_report.g.tpl +++ b/tpl/html_report.g.tpl @@ -75,6 +75,7 @@ .select select { outline: none; -webkit-appearance: none; + -moz-appearance: none; display: block; padding: 0 3em 0 1.5em; margin: 0.3em; @@ -178,11 +179,13 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! benchmarks: [ {% for benchmark in run.benchmarks %}{ name: '{$benchmark.name}', - mean: {$benchmark.mean}, - stddev: {$benchmark.stddev}, + {%if benchmark.data %} + mean: {$benchmark.data.mean}, + stddev: {$benchmark.data.stddev}, samples: [ - {% for sample in benchmark.samples %}{$sample}, {% endfor %} + {% for sample in benchmark.data.samples %}{$sample}, {% endfor %} ], + {% endif %} },{% endfor %} ] },{% endfor %} @@ -227,7 +230,7 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! mode: 'markers', marker: { symbol: i }, y: b.samples, - x: b.samples.map(function (_, i) { return i; }) + x: b.samples && b.samples.map(function (_, i) { return i; }) } }); var layout = { From 74054da6e65613f98b60ec529e7212f4122a3b4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Sat, 22 Oct 2016 12:35:10 +0200 Subject: [PATCH 3/6] Make html reporter preserve order of benchmark runs Benchmarks are run in the order they are written in the file. If the reporter preserves it, this gives the benchmarks author control over what is the best way to present the results of the benchmarks. However, this was not the case because benchmark results where stored in an `unsorted_map` that has an unspecified order. By storing them in a vector that is populated as the benchmarks are run, we do preserve the run order. --- include/nonius/reporters/html_reporter.h++ | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/include/nonius/reporters/html_reporter.h++ b/include/nonius/reporters/html_reporter.h++ index 537b91c..67b3220 100644 --- a/include/nonius/reporters/html_reporter.h++ +++ b/include/nonius/reporters/html_reporter.h++ @@ -82,7 +82,7 @@ namespace nonius { } void do_benchmark_start(std::string const& name) override { if(verbose) progress_stream() << "\nbenchmarking " << name << "\n"; - current = runs.back().data.insert({name, {}}).first; + runs.back().data.push_back({name, {}, {}}); } void do_measurement_start(execution_plan plan) override { @@ -91,13 +91,13 @@ namespace nonius { if(verbose) progress_stream() << "collecting " << n_samples << " samples, " << plan.iterations_per_sample << " iterations each, in estimated " << detail::pretty_duration(plan.estimated_duration) << "\n"; } void do_measurement_complete(std::vector const& samples) override { - current->second.samples = samples; + runs.back().data.back().samples = samples; } void do_analysis_complete(sample_analysis const& analysis) override { - current->second.analysis = analysis; + runs.back().data.back().analysis = analysis; } void do_benchmark_failure(std::exception_ptr) override { - error_stream() << current->first << " failed to run successfully\n"; + error_stream() << runs.back().data.back().name << " failed to run successfully\n"; } void do_suite_complete() override { @@ -121,14 +121,14 @@ namespace nonius { params.push_back(item); } run_item["params"] = cpptempl::make_data(params); - for(auto d : r.data) { + for(auto&& d : r.data) { cpptempl::data_map item; - item["name"] = escape(d.first); + item["name"] = escape(d.name); cpptempl::data_map data; - if (!d.second.samples.empty()) { - data["mean"] = truncate(d.second.analysis.mean.point.count() * magnitude); - data["stddev"] = truncate(d.second.analysis.standard_deviation.point.count() * magnitude); - for(auto e : d.second.samples) + if (!d.samples.empty()) { + data["mean"] = truncate(d.analysis.mean.point.count() * magnitude); + data["stddev"] = truncate(d.analysis.standard_deviation.point.count() * magnitude); + for(auto e : d.samples) data["samples"].push_back(truncate(e.count() * magnitude)); } item["data"] = data; @@ -150,9 +150,9 @@ namespace nonius { std::vector mins; mins.reserve(runs.size() * runs.front().data.size()); for (auto&& r : runs) { - for(auto d : r.data) { - if (d.second.samples.begin() != d.second.samples.end()) - mins.push_back(*std::min_element(d.second.samples.begin(), d.second.samples.end())); + for(auto&& d : r.data) { + if (d.samples.begin() != d.samples.end()) + mins.push_back(*std::min_element(d.samples.begin(), d.samples.end())); } } auto min = *std::min_element(mins.begin(), mins.end()); @@ -172,13 +172,14 @@ namespace nonius { } struct result_t { + std::string name; std::vector samples; sample_analysis analysis; }; struct run_t { parameters params; - std::unordered_map data; + std::vector data; }; int n_samples; @@ -187,7 +188,6 @@ namespace nonius { bool logarithmic; std::string run_param; std::vector runs; - typename std::unordered_map::iterator current; }; NONIUS_REPORTER("html", html_reporter); From fd8b9681ddb97ef7c8e27142bc5a91f4dd839f9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Wed, 26 Oct 2016 23:50:49 +0200 Subject: [PATCH 4/6] Add toggle to sort entries in html generated report In some cases, specially in the bar graph summary view when many benchmarks are present, it can be useful to sort benchmarks based on their results. This commit adds a checkbox labeled `sorted` to the top bar --where the plot chooser is-- that enables sorting the benchmarks by order. Note that sorting the benchmarks changes the color and checkmarks of the traces. It might be better to preserve the initial ones, but I was too lazy to do so--I believe that we would need to manually asign colors and markers instead of relying on plotly auto-asigning them. Here is an example of the results: https://sinusoid.es/misc/nonius/sorting-example8.html --- tpl/report.css | 13 ++++++++++ tpl/report.tpl.js | 65 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/tpl/report.css b/tpl/report.css index 4a95350..3df8b44 100644 --- a/tpl/report.css +++ b/tpl/report.css @@ -74,6 +74,7 @@ body { display: block; padding: 0 3em 0 1.5em; margin: 0.3em; + height: 2em; transition: border-color 0.2s; border: 2px solid #aaa; @@ -92,6 +93,18 @@ body { color: white; } +div.is-sorted { + position: absolute; + top: 0em; + right: 1em; + line-height: 2.8em; +} + +div.is-sorted input { + position: relative; + top: 3px; +} + #plot { position: absolute; min-width: 300px; diff --git a/tpl/report.tpl.js b/tpl/report.tpl.js index 0ae23ed..00a82ad 100644 --- a/tpl/report.tpl.js +++ b/tpl/report.tpl.js @@ -25,7 +25,8 @@ },{% endfor %} ] }; - + var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; }) + var sortOrder = computeSortedOrder(); var plotdiv = document.getElementById("plot"); window.addEventListener("resize", function() { Plotly.Plots.resize(plotdiv); @@ -36,28 +37,65 @@ chooser.addEventListener("blur", chooser.focus.bind(chooser)); chooser.focus(); + var isSortedBox = document.getElementById("is-sorted"); + isSortedBox.addEventListener("change", choosePlot); + var legendStyle = { font: { family: 'monospace' }, borderwidth: 2, bordercolor: 'black' } + function zeroes(count) { + var arr = [] + while (count --> 0) arr.push(0) + return arr + } + + function computeSortedOrder() { + // We sort each run. Then we compute the "points" of each + // benchmark as the sum of the positions of this benchmkark on + // each run. This gives us a rough indication of which + // benchmark is best -- the lower the points, the better. + var runsOrder = data.runs.map(function (r) { + order = r.benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return r.benchmarks[a].mean - r.benchmarks[b].mean + }) + return order + }) + var length = data.runs[0].benchmarks.length + var points = runsOrder.reduce(function (acc, r) { + r.forEach(function (elem, idx) { + acc[elem] += idx + }) + return acc + }, zeroes(length)) + var order = data.runs[0].benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return points[a] - points[b] + }) + return order + } + function choosePlot() { - var plot = chooser.options[chooser.selectedIndex].value; + var plot = chooser.options[chooser.selectedIndex].value + var order = isSortedBox.checked ? sortOrder : origOrder if (plot == 'summary') { if (data.runs.length > 1) { - plotSummary(); + plotSummary(order); } else { - plotSingleSummary(); + plotSingleSummary(order); } } else { - plotSamples(plot); + plotSamples(plot, order); } } - function plotSamples(plot) { + function plotSamples(plot, order) { var run = data.runs[plot]; - var traces = run.benchmarks.map(function (b, i) { + var traces = order.map(function (i) { + var b = run.benchmarks[i] return { name: b.name, type: 'scatter', @@ -81,10 +119,10 @@ Plotly.newPlot(plotdiv, traces, layout); } - function plotSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSummary(order) { + var traces = order.map(function (i) { return { - name: b.name, + name: data.runs[0].benchmarks[i].name, type: 'scatter', marker: { symbol: i }, x: data.runs.map(function (r) { return r.params[data.param]; }), @@ -114,12 +152,13 @@ Plotly.newPlot(plotdiv, traces, layout); } - function plotSingleSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSingleSummary(order) { + var traces = order.map(function (i) { + var b = data.runs[0].benchmarks[i] return { type: 'bar', name: b.name, - x: [ 0 ], + x: [ data.title ], y: [ b.mean ], error_y: { type: 'data', From c12f256849ddcfdc84e59bab6c48455834352339 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Wed, 26 Oct 2016 23:56:29 +0200 Subject: [PATCH 5/6] Update auto-generated files --- .../nonius/detail/html_report_template.g.h++ | 82 ++++++++++++++++--- tpl/html_report.g.tpl | 82 ++++++++++++++++--- tpl/html_report.tpl | 4 + 3 files changed, 142 insertions(+), 26 deletions(-) diff --git a/include/nonius/detail/html_report_template.g.h++ b/include/nonius/detail/html_report_template.g.h++ index f80be9b..cb4596c 100644 --- a/include/nonius/detail/html_report_template.g.h++ +++ b/include/nonius/detail/html_report_template.g.h++ @@ -79,6 +79,7 @@ " display: block;\n" " padding: 0 3em 0 1.5em;\n" " margin: 0.3em;\n" +" height: 2em;\n" "\n" " transition: border-color 0.2s;\n" " border: 2px solid #aaa;\n" @@ -97,6 +98,18 @@ " color: white;\n" "}\n" "\n" +"div.is-sorted {\n" +" position: absolute;\n" +" top: 0em;\n" +" right: 1em;\n" +" line-height: 2.8em;\n" +"}\n" +"\n" +"div.is-sorted input {\n" +" position: relative;\n" +" top: 2px;\n" +"}\n" +"\n" "#plot {\n" " position: absolute;\n" " min-width: 300px;\n" @@ -281,6 +294,10 @@ " {% endfor %}\n" " \n" " \n" +"
\n" +" \n" +" \n" +"
\n" " \n" "
\n" "
Generated with nonius
\n" @@ -311,7 +328,8 @@ " },{% endfor %}\n" " ]\n" " };\n" -"\n" +" var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; })\n" +" var sortOrder = computeSortedOrder();\n" " var plotdiv = document.getElementById(\"plot\");\n" " window.addEventListener(\"resize\", function() {\n" " Plotly.Plots.resize(plotdiv);\n" @@ -322,28 +340,65 @@ " chooser.addEventListener(\"blur\", chooser.focus.bind(chooser));\n" " chooser.focus();\n" "\n" +" var isSortedBox = document.getElementById(\"is-sorted\");\n" +" isSortedBox.addEventListener(\"change\", choosePlot);\n" +"\n" " var legendStyle = {\n" " font: { family: 'monospace' },\n" " borderwidth: 2,\n" " bordercolor: 'black'\n" " }\n" "\n" +" function zeroes(count) {\n" +" var arr = []\n" +" while (count --> 0) arr.push(0)\n" +" return arr\n" +" }\n" +"\n" +" function computeSortedOrder() {\n" +" // We sort each run. Then we compute the \"points\" of each\n" +" // benchmark as the sum of the positions of this benchmkark on\n" +" // each run. This gives us a rough indication of which\n" +" // benchmark is best -- the lower the points, the better.\n" +" var runsOrder = data.runs.map(function (r) {\n" +" order = r.benchmarks.map(function (_, i) { return i; })\n" +" order.sort(function (a, b) {\n" +" return r.benchmarks[a].mean - r.benchmarks[b].mean\n" +" })\n" +" return order\n" +" })\n" +" var length = data.runs[0].benchmarks.length\n" +" var points = runsOrder.reduce(function (acc, r) {\n" +" r.forEach(function (elem, idx) {\n" +" acc[elem] += idx\n" +" })\n" +" return acc\n" +" }, zeroes(length))\n" +" var order = data.runs[0].benchmarks.map(function (_, i) { return i; })\n" +" order.sort(function (a, b) {\n" +" return points[a] - points[b]\n" +" })\n" +" return order\n" +" }\n" +"\n" " function choosePlot() {\n" -" var plot = chooser.options[chooser.selectedIndex].value;\n" +" var plot = chooser.options[chooser.selectedIndex].value\n" +" var order = isSortedBox.checked ? sortOrder : origOrder\n" " if (plot == 'summary') {\n" " if (data.runs.length > 1) {\n" -" plotSummary();\n" +" plotSummary(order);\n" " } else {\n" -" plotSingleSummary();\n" +" plotSingleSummary(order);\n" " }\n" " } else {\n" -" plotSamples(plot);\n" +" plotSamples(plot, order);\n" " }\n" " }\n" "\n" -" function plotSamples(plot) {\n" +" function plotSamples(plot, order) {\n" " var run = data.runs[plot];\n" -" var traces = run.benchmarks.map(function (b, i) {\n" +" var traces = order.map(function (i) {\n" +" var b = run.benchmarks[i]\n" " return {\n" " name: b.name,\n" " type: 'scatter',\n" @@ -367,10 +422,10 @@ " Plotly.newPlot(plotdiv, traces, layout);\n" " }\n" "\n" -" function plotSummary() {\n" -" var traces = data.runs[0].benchmarks.map(function (b, i) {\n" +" function plotSummary(order) {\n" +" var traces = order.map(function (i) {\n" " return {\n" -" name: b.name,\n" +" name: data.runs[0].benchmarks[i].name,\n" " type: 'scatter',\n" " marker: { symbol: i },\n" " x: data.runs.map(function (r) { return r.params[data.param]; }),\n" @@ -400,12 +455,13 @@ " Plotly.newPlot(plotdiv, traces, layout);\n" " }\n" "\n" -" function plotSingleSummary() {\n" -" var traces = data.runs[0].benchmarks.map(function (b, i) {\n" +" function plotSingleSummary(order) {\n" +" var traces = order.map(function (i) {\n" +" var b = data.runs[0].benchmarks[i]\n" " return {\n" " type: 'bar',\n" " name: b.name,\n" -" x: [ 0 ],\n" +" x: [ data.title ],\n" " y: [ b.mean ],\n" " error_y: {\n" " type: 'data',\n" diff --git a/tpl/html_report.g.tpl b/tpl/html_report.g.tpl index 5fb5b01..d6cd714 100644 --- a/tpl/html_report.g.tpl +++ b/tpl/html_report.g.tpl @@ -79,6 +79,7 @@ display: block; padding: 0 3em 0 1.5em; margin: 0.3em; + height: 2em; transition: border-color 0.2s; border: 2px solid #aaa; @@ -97,6 +98,18 @@ color: white; } +div.is-sorted { + position: absolute; + top: 0em; + right: 1em; + line-height: 2.8em; +} + +div.is-sorted input { + position: relative; + top: 2px; +} + #plot { position: absolute; min-width: 300px; @@ -161,6 +174,10 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! {% endfor %} +
+ + +
@@ -191,7 +208,8 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! },{% endfor %} ] }; - + var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; }) + var sortOrder = computeSortedOrder(); var plotdiv = document.getElementById("plot"); window.addEventListener("resize", function() { Plotly.Plots.resize(plotdiv); @@ -202,28 +220,65 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! chooser.addEventListener("blur", chooser.focus.bind(chooser)); chooser.focus(); + var isSortedBox = document.getElementById("is-sorted"); + isSortedBox.addEventListener("change", choosePlot); + var legendStyle = { font: { family: 'monospace' }, borderwidth: 2, bordercolor: 'black' } + function zeroes(count) { + var arr = [] + while (count --> 0) arr.push(0) + return arr + } + + function computeSortedOrder() { + // We sort each run. Then we compute the "points" of each + // benchmark as the sum of the positions of this benchmkark on + // each run. This gives us a rough indication of which + // benchmark is best -- the lower the points, the better. + var runsOrder = data.runs.map(function (r) { + order = r.benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return r.benchmarks[a].mean - r.benchmarks[b].mean + }) + return order + }) + var length = data.runs[0].benchmarks.length + var points = runsOrder.reduce(function (acc, r) { + r.forEach(function (elem, idx) { + acc[elem] += idx + }) + return acc + }, zeroes(length)) + var order = data.runs[0].benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return points[a] - points[b] + }) + return order + } + function choosePlot() { - var plot = chooser.options[chooser.selectedIndex].value; + var plot = chooser.options[chooser.selectedIndex].value + var order = isSortedBox.checked ? sortOrder : origOrder if (plot == 'summary') { if (data.runs.length > 1) { - plotSummary(); + plotSummary(order); } else { - plotSingleSummary(); + plotSingleSummary(order); } } else { - plotSamples(plot); + plotSamples(plot, order); } } - function plotSamples(plot) { + function plotSamples(plot, order) { var run = data.runs[plot]; - var traces = run.benchmarks.map(function (b, i) { + var traces = order.map(function (i) { + var b = run.benchmarks[i] return { name: b.name, type: 'scatter', @@ -247,10 +302,10 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! Plotly.newPlot(plotdiv, traces, layout); } - function plotSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSummary(order) { + var traces = order.map(function (i) { return { - name: b.name, + name: data.runs[0].benchmarks[i].name, type: 'scatter', marker: { symbol: i }, x: data.runs.map(function (r) { return r.params[data.param]; }), @@ -280,12 +335,13 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! Plotly.newPlot(plotdiv, traces, layout); } - function plotSingleSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSingleSummary(order) { + var traces = order.map(function (i) { + var b = data.runs[0].benchmarks[i] return { type: 'bar', name: b.name, - x: [ 0 ], + x: [ data.title ], y: [ b.mean ], error_y: { type: 'data', diff --git a/tpl/html_report.tpl b/tpl/html_report.tpl index c0102d5..cce7446 100644 --- a/tpl/html_report.tpl +++ b/tpl/html_report.tpl @@ -22,6 +22,10 @@ {% endfor %} +
+ + +
From 65cecd25994a96ee61718e10ff24fc4297788d90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Pedro=20Bol=C3=ADvar=20Puente?= Date: Thu, 23 Feb 2017 12:19:30 +0100 Subject: [PATCH 6/6] Add output summary to html reporter --- include/nonius/reporters/html_reporter.h++ | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/include/nonius/reporters/html_reporter.h++ b/include/nonius/reporters/html_reporter.h++ index 67b3220..139571d 100644 --- a/include/nonius/reporters/html_reporter.h++ +++ b/include/nonius/reporters/html_reporter.h++ @@ -139,7 +139,24 @@ namespace nonius { cpptempl::parse(report_stream(), templ, map); report_stream() << std::flush; - if(verbose) progress_stream() << "done\n"; + if(verbose) { + progress_stream() << "\n\nresult summary (" + << detail::units_for_magnitude(magnitude) + << ")\n"; + for (auto&& r : runs) { + for (auto&& p : r.params) + progress_stream() << "\n " << p.first << " = " << p.second; + progress_stream() << "\n"; + for(auto&& d : r.data) { + progress_stream() << " " << d.name << "\t " + << truncate(d.analysis.mean.point.count() * magnitude) << "\t " + << truncate(d.analysis.standard_deviation.point.count() * magnitude) + << "\n"; + } + } + progress_stream() << "\ndone\n"; + progress_stream() << std::flush; + } } static double truncate(double x) {