diff --git a/examples/example8.c++ b/examples/example8.c++ new file mode 100644 index 0000000..ca4cf1c --- /dev/null +++ b/examples/example8.c++ @@ -0,0 +1,98 @@ +#define NONIUS_RUNNER +#include + +#include +#include +#include +#include +#include + +NONIUS_PARAM(N, std::size_t{100}) +NONIUS_PARAM(QUADRATIC_MAX_N, std::size_t{10000}) + +template +std::vector make_random_vector(std::size_t n) +{ + auto v = std::vector(n); + std::iota(v.begin(), v.end(), IntT{}); + std::shuffle(v.begin(), v.end(), std::default_random_engine{42}); + return v; +} + +template +void bubble_sort(Iter first, Iter last) +{ + using std::swap; + auto sorted = false; + if (first == last) return; + do { + sorted = true; + for (auto it = first, nxt = next(it); + nxt != last; + it = nxt, nxt = next(it)) + { + if (*it > *nxt) { + swap(*it, *nxt); + sorted = false; + } + } + } while (!sorted); +} + +template +void insert_sort(Iter first, Iter last) +{ + using std::swap; + for (auto it = first; it != last; ++it) + swap(*it, *std::min_element(it, last)); +} + +NONIUS_BENCHMARK("std::sort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + std::sort(vs[r].begin(), vs[r].end()); + }); +}) + +NONIUS_BENCHMARK("std::qsort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + std::qsort( + &vs[r][0], vs[r].size(), sizeof(int), + [] (const void* a, const void* b) { + return *static_cast(a) - *static_cast(b); + }); + }); +}) + +NONIUS_BENCHMARK("bubble_sort", [](nonius::chronometer meter) +{ + auto n = meter.param(); + if (n >= meter.param()) + nonius::skip(); + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + bubble_sort(vs[r].begin(), vs[r].end()); + }); +}) + +NONIUS_BENCHMARK("insert_sort", [](nonius::parameters params) +{ + auto n = params.get(); + if (n >= params.get()) + nonius::skip(); + return [=](nonius::chronometer meter) { + auto v = make_random_vector(n); + auto vs = std::vector(meter.runs(), v); + meter.measure([&] (int r) { + insert_sort(vs[r].begin(), vs[r].end()); + }); + }; +}) diff --git a/include/nonius/detail/html_report_template.g.h++ b/include/nonius/detail/html_report_template.g.h++ index e7eea1c..cb4596c 100644 --- a/include/nonius/detail/html_report_template.g.h++ +++ b/include/nonius/detail/html_report_template.g.h++ @@ -75,9 +75,11 @@ ".select select {\n" " outline: none;\n" " -webkit-appearance: none;\n" +" -moz-appearance: none;\n" " display: block;\n" " padding: 0 3em 0 1.5em;\n" " margin: 0.3em;\n" +" height: 2em;\n" "\n" " transition: border-color 0.2s;\n" " border: 2px solid #aaa;\n" @@ -96,6 +98,18 @@ " color: white;\n" "}\n" "\n" +"div.is-sorted {\n" +" position: absolute;\n" +" top: 0em;\n" +" right: 1em;\n" +" line-height: 2.8em;\n" +"}\n" +"\n" +"div.is-sorted input {\n" +" position: relative;\n" +" top: 2px;\n" +"}\n" +"\n" "#plot {\n" " position: absolute;\n" " min-width: 300px;\n" @@ -280,6 +294,10 @@ " {% endfor %}\n" " \n" " \n" +"
\n" +" \n" +" \n" +"
\n" " \n" "
\n" "
Generated with nonius
\n" @@ -298,17 +316,20 @@ " benchmarks: [\n" " {% for benchmark in run.benchmarks %}{\n" " name: '{$benchmark.name}',\n" -" mean: {$benchmark.mean},\n" -" stddev: {$benchmark.stddev},\n" +" {%if benchmark.data %}\n" +" mean: {$benchmark.data.mean},\n" +" stddev: {$benchmark.data.stddev},\n" " samples: [\n" -" {% for sample in benchmark.samples %}{$sample}, {% endfor %}\n" +" {% for sample in benchmark.data.samples %}{$sample}, {% endfor %}\n" " ],\n" +" {% endif %}\n" " },{% endfor %}\n" " ]\n" " },{% endfor %}\n" " ]\n" " };\n" -"\n" +" var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; })\n" +" var sortOrder = computeSortedOrder();\n" " var plotdiv = document.getElementById(\"plot\");\n" " window.addEventListener(\"resize\", function() {\n" " Plotly.Plots.resize(plotdiv);\n" @@ -319,35 +340,72 @@ " chooser.addEventListener(\"blur\", chooser.focus.bind(chooser));\n" " chooser.focus();\n" "\n" +" var isSortedBox = document.getElementById(\"is-sorted\");\n" +" isSortedBox.addEventListener(\"change\", choosePlot);\n" +"\n" " var legendStyle = {\n" " font: { family: 'monospace' },\n" " borderwidth: 2,\n" " bordercolor: 'black'\n" " }\n" "\n" +" function zeroes(count) {\n" +" var arr = []\n" +" while (count --> 0) arr.push(0)\n" +" return arr\n" +" }\n" +"\n" +" function computeSortedOrder() {\n" +" // We sort each run. Then we compute the \"points\" of each\n" +" // benchmark as the sum of the positions of this benchmkark on\n" +" // each run. This gives us a rough indication of which\n" +" // benchmark is best -- the lower the points, the better.\n" +" var runsOrder = data.runs.map(function (r) {\n" +" order = r.benchmarks.map(function (_, i) { return i; })\n" +" order.sort(function (a, b) {\n" +" return r.benchmarks[a].mean - r.benchmarks[b].mean\n" +" })\n" +" return order\n" +" })\n" +" var length = data.runs[0].benchmarks.length\n" +" var points = runsOrder.reduce(function (acc, r) {\n" +" r.forEach(function (elem, idx) {\n" +" acc[elem] += idx\n" +" })\n" +" return acc\n" +" }, zeroes(length))\n" +" var order = data.runs[0].benchmarks.map(function (_, i) { return i; })\n" +" order.sort(function (a, b) {\n" +" return points[a] - points[b]\n" +" })\n" +" return order\n" +" }\n" +"\n" " function choosePlot() {\n" -" var plot = chooser.options[chooser.selectedIndex].value;\n" +" var plot = chooser.options[chooser.selectedIndex].value\n" +" var order = isSortedBox.checked ? sortOrder : origOrder\n" " if (plot == 'summary') {\n" " if (data.runs.length > 1) {\n" -" plotSummary();\n" +" plotSummary(order);\n" " } else {\n" -" plotSingleSummary();\n" +" plotSingleSummary(order);\n" " }\n" " } else {\n" -" plotSamples(plot);\n" +" plotSamples(plot, order);\n" " }\n" " }\n" "\n" -" function plotSamples(plot) {\n" +" function plotSamples(plot, order) {\n" " var run = data.runs[plot];\n" -" var traces = run.benchmarks.map(function (b, i) {\n" +" var traces = order.map(function (i) {\n" +" var b = run.benchmarks[i]\n" " return {\n" " name: b.name,\n" " type: 'scatter',\n" " mode: 'markers',\n" " marker: { symbol: i },\n" " y: b.samples,\n" -" x: b.samples.map(function (_, i) { return i; })\n" +" x: b.samples && b.samples.map(function (_, i) { return i; })\n" " }\n" " });\n" " var layout = {\n" @@ -364,10 +422,10 @@ " Plotly.newPlot(plotdiv, traces, layout);\n" " }\n" "\n" -" function plotSummary() {\n" -" var traces = data.runs[0].benchmarks.map(function (b, i) {\n" +" function plotSummary(order) {\n" +" var traces = order.map(function (i) {\n" " return {\n" -" name: b.name,\n" +" name: data.runs[0].benchmarks[i].name,\n" " type: 'scatter',\n" " marker: { symbol: i },\n" " x: data.runs.map(function (r) { return r.params[data.param]; }),\n" @@ -397,12 +455,13 @@ " Plotly.newPlot(plotdiv, traces, layout);\n" " }\n" "\n" -" function plotSingleSummary() {\n" -" var traces = data.runs[0].benchmarks.map(function (b, i) {\n" +" function plotSingleSummary(order) {\n" +" var traces = order.map(function (i) {\n" +" var b = data.runs[0].benchmarks[i]\n" " return {\n" " type: 'bar',\n" " name: b.name,\n" -" x: [ 0 ],\n" +" x: [ data.title ],\n" " y: [ b.mean ],\n" " error_y: {\n" " type: 'data',\n" diff --git a/include/nonius/go.h++ b/include/nonius/go.h++ index b4994c1..20056e0 100644 --- a/include/nonius/go.h++ +++ b/include/nonius/go.h++ @@ -57,14 +57,14 @@ namespace nonius { } }; - template - detail::CompleteType> user_code(reporter& rep, Fun&& fun) { - try { - return detail::complete_invoke(std::forward(fun)); - } catch(...) { - rep.benchmark_failure(std::current_exception()); - throw benchmark_user_error(); + struct skip_error : virtual std::exception { + const char* what() const NONIUS_NOEXCEPT override { + return "benchmark was skipped"; } + }; + + inline void skip() { + throw skip_error{}; } inline std::vector generate_params(param_configuration cfg) { @@ -117,24 +117,22 @@ namespace nonius { rep.params_start(params); for (auto&& bench : benchmarks) { rep.benchmark_start(bench.name); - - auto plan = user_code(rep, [&]{ - return bench.template prepare(cfg, params, env); - }); - - rep.measurement_start(plan); - auto samples = user_code(rep, [&]{ - return plan.template run(cfg, env); - }); - rep.measurement_complete(std::vector(samples.begin(), samples.end())); - - if(!cfg.no_analysis) { - rep.analysis_start(); - auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end()); - rep.analysis_complete(analysis); + try { + auto plan = bench.template prepare(cfg, params, env); + + rep.measurement_start(plan); + auto samples = plan.template run(cfg, env); + rep.measurement_complete(std::vector(samples.begin(), samples.end())); + + if(!cfg.no_analysis) { + rep.analysis_start(); + auto analysis = detail::analyse(cfg, env, samples.begin(), samples.end()); + rep.analysis_complete(analysis); + } + rep.benchmark_complete(); + } catch (...) { + rep.benchmark_failure(std::current_exception()); } - - rep.benchmark_complete(); } rep.params_complete(); } diff --git a/include/nonius/reporters/html_reporter.h++ b/include/nonius/reporters/html_reporter.h++ index 8e1aa21..139571d 100644 --- a/include/nonius/reporters/html_reporter.h++ +++ b/include/nonius/reporters/html_reporter.h++ @@ -82,7 +82,7 @@ namespace nonius { } void do_benchmark_start(std::string const& name) override { if(verbose) progress_stream() << "\nbenchmarking " << name << "\n"; - current = runs.back().data.insert({name, {}}).first; + runs.back().data.push_back({name, {}, {}}); } void do_measurement_start(execution_plan plan) override { @@ -91,13 +91,13 @@ namespace nonius { if(verbose) progress_stream() << "collecting " << n_samples << " samples, " << plan.iterations_per_sample << " iterations each, in estimated " << detail::pretty_duration(plan.estimated_duration) << "\n"; } void do_measurement_complete(std::vector const& samples) override { - current->second.samples = samples; + runs.back().data.back().samples = samples; } void do_analysis_complete(sample_analysis const& analysis) override { - current->second.analysis = analysis; + runs.back().data.back().analysis = analysis; } void do_benchmark_failure(std::exception_ptr) override { - error_stream() << current->first << " failed to run successfully\n"; + error_stream() << runs.back().data.back().name << " failed to run successfully\n"; } void do_suite_complete() override { @@ -121,13 +121,17 @@ namespace nonius { params.push_back(item); } run_item["params"] = cpptempl::make_data(params); - for(auto d : r.data) { + for(auto&& d : r.data) { cpptempl::data_map item; - item["name"] = escape(d.first); - item["mean"] = truncate(d.second.analysis.mean.point.count() * magnitude); - item["stddev"] = truncate(d.second.analysis.standard_deviation.point.count() * magnitude); - for(auto e : d.second.samples) - item["samples"].push_back(truncate(e.count() * magnitude)); + item["name"] = escape(d.name); + cpptempl::data_map data; + if (!d.samples.empty()) { + data["mean"] = truncate(d.analysis.mean.point.count() * magnitude); + data["stddev"] = truncate(d.analysis.standard_deviation.point.count() * magnitude); + for(auto e : d.samples) + data["samples"].push_back(truncate(e.count() * magnitude)); + } + item["data"] = data; run_item["benchmarks"].push_back(item); } map["runs"].push_back(run_item); @@ -135,7 +139,24 @@ namespace nonius { cpptempl::parse(report_stream(), templ, map); report_stream() << std::flush; - if(verbose) progress_stream() << "done\n"; + if(verbose) { + progress_stream() << "\n\nresult summary (" + << detail::units_for_magnitude(magnitude) + << ")\n"; + for (auto&& r : runs) { + for (auto&& p : r.params) + progress_stream() << "\n " << p.first << " = " << p.second; + progress_stream() << "\n"; + for(auto&& d : r.data) { + progress_stream() << " " << d.name << "\t " + << truncate(d.analysis.mean.point.count() * magnitude) << "\t " + << truncate(d.analysis.standard_deviation.point.count() * magnitude) + << "\n"; + } + } + progress_stream() << "\ndone\n"; + progress_stream() << std::flush; + } } static double truncate(double x) { @@ -146,8 +167,9 @@ namespace nonius { std::vector mins; mins.reserve(runs.size() * runs.front().data.size()); for (auto&& r : runs) { - for(auto d : r.data) { - mins.push_back(*std::min_element(d.second.samples.begin(), d.second.samples.end())); + for(auto&& d : r.data) { + if (d.samples.begin() != d.samples.end()) + mins.push_back(*std::min_element(d.samples.begin(), d.samples.end())); } } auto min = *std::min_element(mins.begin(), mins.end()); @@ -167,13 +189,14 @@ namespace nonius { } struct result_t { + std::string name; std::vector samples; sample_analysis analysis; }; struct run_t { parameters params; - std::unordered_map data; + std::vector data; }; int n_samples; @@ -182,7 +205,6 @@ namespace nonius { bool logarithmic; std::string run_param; std::vector runs; - typename std::unordered_map::iterator current; }; NONIUS_REPORTER("html", html_reporter); diff --git a/tpl/html_report.g.tpl b/tpl/html_report.g.tpl index c98480f..d6cd714 100644 --- a/tpl/html_report.g.tpl +++ b/tpl/html_report.g.tpl @@ -75,9 +75,11 @@ .select select { outline: none; -webkit-appearance: none; + -moz-appearance: none; display: block; padding: 0 3em 0 1.5em; margin: 0.3em; + height: 2em; transition: border-color 0.2s; border: 2px solid #aaa; @@ -96,6 +98,18 @@ color: white; } +div.is-sorted { + position: absolute; + top: 0em; + right: 1em; + line-height: 2.8em; +} + +div.is-sorted input { + position: relative; + top: 2px; +} + #plot { position: absolute; min-width: 300px; @@ -160,6 +174,10 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! {% endfor %} +
+ + +
@@ -178,17 +196,20 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! benchmarks: [ {% for benchmark in run.benchmarks %}{ name: '{$benchmark.name}', - mean: {$benchmark.mean}, - stddev: {$benchmark.stddev}, + {%if benchmark.data %} + mean: {$benchmark.data.mean}, + stddev: {$benchmark.data.stddev}, samples: [ - {% for sample in benchmark.samples %}{$sample}, {% endfor %} + {% for sample in benchmark.data.samples %}{$sample}, {% endfor %} ], + {% endif %} },{% endfor %} ] },{% endfor %} ] }; - + var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; }) + var sortOrder = computeSortedOrder(); var plotdiv = document.getElementById("plot"); window.addEventListener("resize", function() { Plotly.Plots.resize(plotdiv); @@ -199,35 +220,72 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! chooser.addEventListener("blur", chooser.focus.bind(chooser)); chooser.focus(); + var isSortedBox = document.getElementById("is-sorted"); + isSortedBox.addEventListener("change", choosePlot); + var legendStyle = { font: { family: 'monospace' }, borderwidth: 2, bordercolor: 'black' } + function zeroes(count) { + var arr = [] + while (count --> 0) arr.push(0) + return arr + } + + function computeSortedOrder() { + // We sort each run. Then we compute the "points" of each + // benchmark as the sum of the positions of this benchmkark on + // each run. This gives us a rough indication of which + // benchmark is best -- the lower the points, the better. + var runsOrder = data.runs.map(function (r) { + order = r.benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return r.benchmarks[a].mean - r.benchmarks[b].mean + }) + return order + }) + var length = data.runs[0].benchmarks.length + var points = runsOrder.reduce(function (acc, r) { + r.forEach(function (elem, idx) { + acc[elem] += idx + }) + return acc + }, zeroes(length)) + var order = data.runs[0].benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return points[a] - points[b] + }) + return order + } + function choosePlot() { - var plot = chooser.options[chooser.selectedIndex].value; + var plot = chooser.options[chooser.selectedIndex].value + var order = isSortedBox.checked ? sortOrder : origOrder if (plot == 'summary') { if (data.runs.length > 1) { - plotSummary(); + plotSummary(order); } else { - plotSingleSummary(); + plotSingleSummary(order); } } else { - plotSamples(plot); + plotSamples(plot, order); } } - function plotSamples(plot) { + function plotSamples(plot, order) { var run = data.runs[plot]; - var traces = run.benchmarks.map(function (b, i) { + var traces = order.map(function (i) { + var b = run.benchmarks[i] return { name: b.name, type: 'scatter', mode: 'markers', marker: { symbol: i }, y: b.samples, - x: b.samples.map(function (_, i) { return i; }) + x: b.samples && b.samples.map(function (_, i) { return i; }) } }); var layout = { @@ -244,10 +302,10 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! Plotly.newPlot(plotdiv, traces, layout); } - function plotSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSummary(order) { + var traces = order.map(function (i) { return { - name: b.name, + name: data.runs[0].benchmarks[i].name, type: 'scatter', marker: { symbol: i }, x: data.runs.map(function (r) { return r.params[data.param]; }), @@ -277,12 +335,13 @@ e.exports=function(t,e,n){function l(n,r){return o.coerce(t,e,i,n,r)}for(var s=! Plotly.newPlot(plotdiv, traces, layout); } - function plotSingleSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSingleSummary(order) { + var traces = order.map(function (i) { + var b = data.runs[0].benchmarks[i] return { type: 'bar', name: b.name, - x: [ 0 ], + x: [ data.title ], y: [ b.mean ], error_y: { type: 'data', diff --git a/tpl/html_report.tpl b/tpl/html_report.tpl index c0102d5..cce7446 100644 --- a/tpl/html_report.tpl +++ b/tpl/html_report.tpl @@ -22,6 +22,10 @@ {% endfor %} +
+ + +
diff --git a/tpl/report.css b/tpl/report.css index 4a95350..3df8b44 100644 --- a/tpl/report.css +++ b/tpl/report.css @@ -74,6 +74,7 @@ body { display: block; padding: 0 3em 0 1.5em; margin: 0.3em; + height: 2em; transition: border-color 0.2s; border: 2px solid #aaa; @@ -92,6 +93,18 @@ body { color: white; } +div.is-sorted { + position: absolute; + top: 0em; + right: 1em; + line-height: 2.8em; +} + +div.is-sorted input { + position: relative; + top: 3px; +} + #plot { position: absolute; min-width: 300px; diff --git a/tpl/report.tpl.js b/tpl/report.tpl.js index 6914263..00a82ad 100644 --- a/tpl/report.tpl.js +++ b/tpl/report.tpl.js @@ -13,17 +13,20 @@ benchmarks: [ {% for benchmark in run.benchmarks %}{ name: '{$benchmark.name}', - mean: {$benchmark.mean}, - stddev: {$benchmark.stddev}, + {%if benchmark.data %} + mean: {$benchmark.data.mean}, + stddev: {$benchmark.data.stddev}, samples: [ - {% for sample in benchmark.samples %}{$sample}, {% endfor %} + {% for sample in benchmark.data.samples %}{$sample}, {% endfor %} ], + {% endif %} },{% endfor %} ] },{% endfor %} ] }; - + var origOrder = data.runs[0].benchmarks.map(function (_, i) { return i; }) + var sortOrder = computeSortedOrder(); var plotdiv = document.getElementById("plot"); window.addEventListener("resize", function() { Plotly.Plots.resize(plotdiv); @@ -34,35 +37,72 @@ chooser.addEventListener("blur", chooser.focus.bind(chooser)); chooser.focus(); + var isSortedBox = document.getElementById("is-sorted"); + isSortedBox.addEventListener("change", choosePlot); + var legendStyle = { font: { family: 'monospace' }, borderwidth: 2, bordercolor: 'black' } + function zeroes(count) { + var arr = [] + while (count --> 0) arr.push(0) + return arr + } + + function computeSortedOrder() { + // We sort each run. Then we compute the "points" of each + // benchmark as the sum of the positions of this benchmkark on + // each run. This gives us a rough indication of which + // benchmark is best -- the lower the points, the better. + var runsOrder = data.runs.map(function (r) { + order = r.benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return r.benchmarks[a].mean - r.benchmarks[b].mean + }) + return order + }) + var length = data.runs[0].benchmarks.length + var points = runsOrder.reduce(function (acc, r) { + r.forEach(function (elem, idx) { + acc[elem] += idx + }) + return acc + }, zeroes(length)) + var order = data.runs[0].benchmarks.map(function (_, i) { return i; }) + order.sort(function (a, b) { + return points[a] - points[b] + }) + return order + } + function choosePlot() { - var plot = chooser.options[chooser.selectedIndex].value; + var plot = chooser.options[chooser.selectedIndex].value + var order = isSortedBox.checked ? sortOrder : origOrder if (plot == 'summary') { if (data.runs.length > 1) { - plotSummary(); + plotSummary(order); } else { - plotSingleSummary(); + plotSingleSummary(order); } } else { - plotSamples(plot); + plotSamples(plot, order); } } - function plotSamples(plot) { + function plotSamples(plot, order) { var run = data.runs[plot]; - var traces = run.benchmarks.map(function (b, i) { + var traces = order.map(function (i) { + var b = run.benchmarks[i] return { name: b.name, type: 'scatter', mode: 'markers', marker: { symbol: i }, y: b.samples, - x: b.samples.map(function (_, i) { return i; }) + x: b.samples && b.samples.map(function (_, i) { return i; }) } }); var layout = { @@ -79,10 +119,10 @@ Plotly.newPlot(plotdiv, traces, layout); } - function plotSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSummary(order) { + var traces = order.map(function (i) { return { - name: b.name, + name: data.runs[0].benchmarks[i].name, type: 'scatter', marker: { symbol: i }, x: data.runs.map(function (r) { return r.params[data.param]; }), @@ -112,12 +152,13 @@ Plotly.newPlot(plotdiv, traces, layout); } - function plotSingleSummary() { - var traces = data.runs[0].benchmarks.map(function (b, i) { + function plotSingleSummary(order) { + var traces = order.map(function (i) { + var b = data.runs[0].benchmarks[i] return { type: 'bar', name: b.name, - x: [ 0 ], + x: [ data.title ], y: [ b.mean ], error_y: { type: 'data',