Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions examples/example8.c++
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
#define NONIUS_RUNNER
#include <nonius.h++>

#include <vector>
#include <random>
#include <algorithm>
#include <functional>
#include <cstdlib>

NONIUS_PARAM(N, std::size_t{100})
NONIUS_PARAM(QUADRATIC_MAX_N, std::size_t{10000})

template <typename IntT=int>
std::vector<IntT> make_random_vector(std::size_t n)
{
auto v = std::vector<IntT>(n);
std::iota(v.begin(), v.end(), IntT{});
std::shuffle(v.begin(), v.end(), std::default_random_engine{42});
return v;
}

template <typename Iter>
void bubble_sort(Iter first, Iter last)
{
using std::swap;
auto sorted = false;
if (first == last) return;
do {
sorted = true;
for (auto it = first, nxt = next(it);
nxt != last;
it = nxt, nxt = next(it))
{
if (*it > *nxt) {
swap(*it, *nxt);
sorted = false;
}
}
} while (!sorted);
}

template <typename Iter>
void insert_sort(Iter first, Iter last)
{
using std::swap;
for (auto it = first; it != last; ++it)
swap(*it, *std::min_element(it, last));
}

NONIUS_BENCHMARK("std::sort", [](nonius::chronometer meter)
{
auto n = meter.param<N>();
auto v = make_random_vector(n);
auto vs = std::vector<decltype(v)>(meter.runs(), v);
meter.measure([&] (int r) {
std::sort(vs[r].begin(), vs[r].end());
});
})

NONIUS_BENCHMARK("std::qsort", [](nonius::chronometer meter)
{
auto n = meter.param<N>();
auto v = make_random_vector(n);
auto vs = std::vector<decltype(v)>(meter.runs(), v);
meter.measure([&] (int r) {
std::qsort(
&vs[r][0], vs[r].size(), sizeof(int),
[] (const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
});
});
})

NONIUS_BENCHMARK("bubble_sort", [](nonius::chronometer meter)
{
auto n = meter.param<N>();
if (n >= meter.param<QUADRATIC_MAX_N>())
nonius::skip();
auto v = make_random_vector(n);
auto vs = std::vector<decltype(v)>(meter.runs(), v);
meter.measure([&] (int r) {
bubble_sort(vs[r].begin(), vs[r].end());
});
})

NONIUS_BENCHMARK("insert_sort", [](nonius::parameters params)
{
auto n = params.get<N>();
if (n >= params.get<QUADRATIC_MAX_N>())
nonius::skip();
return [=](nonius::chronometer meter) {
auto v = make_random_vector(n);
auto vs = std::vector<decltype(v)>(meter.runs(), v);
meter.measure([&] (int r) {
insert_sort(vs[r].begin(), vs[r].end());
});
};
})
93 changes: 76 additions & 17 deletions include/nonius/detail/html_report_template.g.h++
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,11 @@
".select select {\n"
" outline: none;\n"
" -webkit-appearance: none;\n"
" -moz-appearance: none;\n"
" display: block;\n"
" padding: 0 3em 0 1.5em;\n"
" margin: 0.3em;\n"
" height: 2em;\n"
"\n"
" transition: border-color 0.2s;\n"
" border: 2px solid #aaa;\n"
Expand All @@ -96,6 +98,18 @@
" color: white;\n"
"}\n"
"\n"
"div.is-sorted {\n"
" position: absolute;\n"
" top: 0em;\n"
" right: 1em;\n"
" line-height: 2.8em;\n"
"}\n"
"\n"
"div.is-sorted input {\n"
" position: relative;\n"
" top: 2px;\n"
"}\n"
"\n"
"#plot {\n"
" position: absolute;\n"
" min-width: 300px;\n"
Expand Down Expand Up @@ -280,6 +294,10 @@
" {% endfor %}\n"
" </select>\n"
" </div>\n"
" <div class=\"is-sorted\">\n"
" <input id=\"is-sorted\" type=\"checkbox\"/>\n"
" <label for=\"is-sorted\">sorted</label>\n"
" </div>\n"
" </div>\n"
" <div id=\"plot\"></div>\n"
" <div id=\"footer\">Generated with <a href=\"http://flamingdangerzone.com/nonius\">nonius</a></div>\n"
Expand All @@ -298,17 +316,20 @@
" benchmarks: [\n"
" {% for benchmark in run.benchmarks %}{\n"
" name: '{$benchmark.name}',\n"
" mean: {$benchmark.mean},\n"
" stddev: {$benchmark.stddev},\n"
" {%if benchmark.data %}\n"
" mean: {$benchmark.data.mean},\n"
" stddev: {$benchmark.data.stddev},\n"
" samples: [\n"
" {% for sample in benchmark.samples %}{$sample}, {% endfor %}\n"
" {% for sample in benchmark.data.samples %}{$sample}, {% endfor %}\n"
" ],\n"
" {% endif %}\n"
" },{% endfor %}\n"
" ]\n"
" },{% endfor %}\n"
" ]\n"
" };\n"
"\n"
" var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; })\n"
" var sortOrder = computeSortedOrder();\n"
" var plotdiv = document.getElementById(\"plot\");\n"
" window.addEventListener(\"resize\", function() {\n"
" Plotly.Plots.resize(plotdiv);\n"
Expand All @@ -319,35 +340,72 @@
" chooser.addEventListener(\"blur\", chooser.focus.bind(chooser));\n"
" chooser.focus();\n"
"\n"
" var isSortedBox = document.getElementById(\"is-sorted\");\n"
" isSortedBox.addEventListener(\"change\", choosePlot);\n"
"\n"
" var legendStyle = {\n"
" font: { family: 'monospace' },\n"
" borderwidth: 2,\n"
" bordercolor: 'black'\n"
" }\n"
"\n"
" function zeroes(count) {\n"
" var arr = []\n"
" while (count --> 0) arr.push(0)\n"
" return arr\n"
" }\n"
"\n"
" function computeSortedOrder() {\n"
" // We sort each run. Then we compute the \"points\" of each\n"
" // benchmark as the sum of the positions of this benchmkark on\n"
" // each run. This gives us a rough indication of which\n"
" // benchmark is best -- the lower the points, the better.\n"
" var runsOrder = data.runs.map(function (r) {\n"
" order = r.benchmarks.map(function (_, i) { return i; })\n"
" order.sort(function (a, b) {\n"
" return r.benchmarks[a].mean - r.benchmarks[b].mean\n"
" })\n"
" return order\n"
" })\n"
" var length = data.runs[0].benchmarks.length\n"
" var points = runsOrder.reduce(function (acc, r) {\n"
" r.forEach(function (elem, idx) {\n"
" acc[elem] += idx\n"
" })\n"
" return acc\n"
" }, zeroes(length))\n"
" var order = data.runs[0].benchmarks.map(function (_, i) { return i; })\n"
" order.sort(function (a, b) {\n"
" return points[a] - points[b]\n"
" })\n"
" return order\n"
" }\n"
"\n"
" function choosePlot() {\n"
" var plot = chooser.options[chooser.selectedIndex].value;\n"
" var plot = chooser.options[chooser.selectedIndex].value\n"
" var order = isSortedBox.checked ? sortOrder : origOrder\n"
" if (plot == 'summary') {\n"
" if (data.runs.length > 1) {\n"
" plotSummary();\n"
" plotSummary(order);\n"
" } else {\n"
" plotSingleSummary();\n"
" plotSingleSummary(order);\n"
" }\n"
" } else {\n"
" plotSamples(plot);\n"
" plotSamples(plot, order);\n"
" }\n"
" }\n"
"\n"
" function plotSamples(plot) {\n"
" function plotSamples(plot, order) {\n"
" var run = data.runs[plot];\n"
" var traces = run.benchmarks.map(function (b, i) {\n"
" var traces = order.map(function (i) {\n"
" var b = run.benchmarks[i]\n"
" return {\n"
" name: b.name,\n"
" type: 'scatter',\n"
" mode: 'markers',\n"
" marker: { symbol: i },\n"
" y: b.samples,\n"
" x: b.samples.map(function (_, i) { return i; })\n"
" x: b.samples && b.samples.map(function (_, i) { return i; })\n"
" }\n"
" });\n"
" var layout = {\n"
Expand All @@ -364,10 +422,10 @@
" Plotly.newPlot(plotdiv, traces, layout);\n"
" }\n"
"\n"
" function plotSummary() {\n"
" var traces = data.runs[0].benchmarks.map(function (b, i) {\n"
" function plotSummary(order) {\n"
" var traces = order.map(function (i) {\n"
" return {\n"
" name: b.name,\n"
" name: data.runs[0].benchmarks[i].name,\n"
" type: 'scatter',\n"
" marker: { symbol: i },\n"
" x: data.runs.map(function (r) { return r.params[data.param]; }),\n"
Expand Down Expand Up @@ -397,12 +455,13 @@
" Plotly.newPlot(plotdiv, traces, layout);\n"
" }\n"
"\n"
" function plotSingleSummary() {\n"
" var traces = data.runs[0].benchmarks.map(function (b, i) {\n"
" function plotSingleSummary(order) {\n"
" var traces = order.map(function (i) {\n"
" var b = data.runs[0].benchmarks[i]\n"
" return {\n"
" type: 'bar',\n"
" name: b.name,\n"
" x: [ 0 ],\n"
" x: [ data.title ],\n"
" y: [ b.mean ],\n"
" error_y: {\n"
" type: 'data',\n"
Expand Down
46 changes: 22 additions & 24 deletions include/nonius/go.h++
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,14 @@ namespace nonius {
}
};

template <typename Fun>
detail::CompleteType<detail::ResultOf<Fun()>> user_code(reporter& rep, Fun&& fun) {
try {
return detail::complete_invoke(std::forward<Fun>(fun));
} catch(...) {
rep.benchmark_failure(std::current_exception());
throw benchmark_user_error();
struct skip_error : virtual std::exception {
const char* what() const NONIUS_NOEXCEPT override {
return "benchmark was skipped";
}
};

inline void skip() {
throw skip_error{};
}

inline std::vector<parameters> generate_params(param_configuration cfg) {
Expand Down Expand Up @@ -117,24 +117,22 @@ namespace nonius {
rep.params_start(params);
for (auto&& bench : benchmarks) {
rep.benchmark_start(bench.name);

auto plan = user_code(rep, [&]{
return bench.template prepare<Clock>(cfg, params, env);
});

rep.measurement_start(plan);
auto samples = user_code(rep, [&]{
return plan.template run<Clock>(cfg, env);
});
rep.measurement_complete(std::vector<fp_seconds>(samples.begin(), samples.end()));

if(!cfg.no_analysis) {
rep.analysis_start();
auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end());
rep.analysis_complete(analysis);
try {
auto plan = bench.template prepare<Clock>(cfg, params, env);

rep.measurement_start(plan);
auto samples = plan.template run<Clock>(cfg, env);
rep.measurement_complete(std::vector<fp_seconds>(samples.begin(), samples.end()));

if(!cfg.no_analysis) {
rep.analysis_start();
auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end());
rep.analysis_complete(analysis);
}
rep.benchmark_complete();
} catch (...) {
rep.benchmark_failure(std::current_exception());
}

rep.benchmark_complete();
}
rep.params_complete();
}
Expand Down
Loading