diff --git a/Framework/include/QualityControl/Reductor.h b/Framework/include/QualityControl/Reductor.h index 042c367b33..180b12715d 100644 --- a/Framework/include/QualityControl/Reductor.h +++ b/Framework/include/QualityControl/Reductor.h @@ -16,12 +16,17 @@ #ifndef QUALITYCONTROL_REDUCTOR_H #define QUALITYCONTROL_REDUCTOR_H +#include "CustomParameters.h" + namespace o2::quality_control::postprocessing { /// \brief An interface for storing columnar data into a TTree class Reductor { + protected: + core::CustomParameters mCustomParameters; + public: /// \brief Constructor Reductor() = default; @@ -34,8 +39,10 @@ class Reductor /// \brief Branch leaf list getter /// \return A C string with a description of a branch format, formatted accordingly to the TTree interface virtual const char* getBranchLeafList() = 0; + /// \brief setter for mCustomParameters + void setCustomConfig(const core::CustomParameters& parameters) { mCustomParameters = parameters; }; }; } // namespace o2::quality_control::postprocessing -#endif //QUALITYCONTROL_REDUCTOR_H +#endif // QUALITYCONTROL_REDUCTOR_H diff --git a/Framework/include/QualityControl/TrendingTaskConfig.h b/Framework/include/QualityControl/TrendingTaskConfig.h index 1ad8adac9e..939de60b5d 100644 --- a/Framework/include/QualityControl/TrendingTaskConfig.h +++ b/Framework/include/QualityControl/TrendingTaskConfig.h @@ -19,6 +19,7 @@ #include #include +#include "CustomParameters.h" #include "QualityControl/PostProcessingConfig.h" namespace o2::quality_control::postprocessing @@ -56,6 +57,7 @@ struct TrendingTaskConfig : PostProcessingConfig { std::string path; std::string name; std::string reductorName; + core::CustomParameters reductorParameters; std::string moduleName; }; diff --git a/Framework/src/TrendingTask.cxx b/Framework/src/TrendingTask.cxx index 6752412bf4..db98dca175 100644 --- a/Framework/src/TrendingTask.cxx +++ b/Framework/src/TrendingTask.cxx @@ -48,7 +48,8 @@ void TrendingTask::configure(const boost::property_tree::ptree& config) // configuration mConfig = TrendingTaskConfig(getID(), config); for (const auto& source : mConfig.dataSources) { - mReductors.emplace(source.name, root_class_factory::create(source.moduleName, source.reductorName)); + auto&& [emplaced, _] = mReductors.emplace(source.name, root_class_factory::create(source.moduleName, source.reductorName)); + emplaced->second->setCustomConfig(source.reductorParameters); } } diff --git a/Framework/src/TrendingTaskConfig.cxx b/Framework/src/TrendingTaskConfig.cxx index 3528db29c0..04de31261b 100644 --- a/Framework/src/TrendingTaskConfig.cxx +++ b/Framework/src/TrendingTaskConfig.cxx @@ -58,6 +58,15 @@ TrendingTaskConfig::TrendingTaskConfig(std::string id, const boost::property_tre plotConfig.get("colorPalette", 0), graphs }); } + + const auto extractReductorParams = [](const boost::property_tree::ptree& dataSourceConfig) -> core::CustomParameters { + core::CustomParameters result; + if (const auto reductorParams = dataSourceConfig.get_child_optional("reductorParameters"); reductorParams.has_value()) { + result.populateCustomParameters(reductorParams.value()); + } + return result; + }; + for (const auto& dataSourceConfig : config.get_child("qc.postprocessing." + id + ".dataSources")) { if (const auto& sourceNames = dataSourceConfig.second.get_child_optional("names"); sourceNames.has_value()) { for (const auto& sourceName : sourceNames.value()) { @@ -65,6 +74,7 @@ TrendingTaskConfig::TrendingTaskConfig(std::string id, const boost::property_tre dataSourceConfig.second.get("path"), sourceName.second.data(), dataSourceConfig.second.get("reductorName"), + extractReductorParams(dataSourceConfig.second), dataSourceConfig.second.get("moduleName") }); } } else if (!dataSourceConfig.second.get("name").empty()) { @@ -73,6 +83,7 @@ TrendingTaskConfig::TrendingTaskConfig(std::string id, const boost::property_tre dataSourceConfig.second.get("path"), dataSourceConfig.second.get("name"), dataSourceConfig.second.get("reductorName"), + extractReductorParams(dataSourceConfig.second), dataSourceConfig.second.get("moduleName") }); } else { throw std::runtime_error("No 'name' value or a 'names' vector in the path 'qc.postprocessing." + id + ".dataSources'"); diff --git a/Framework/test/testReductor.cxx b/Framework/test/testReductor.cxx index d76da0f38e..26b3e78243 100644 --- a/Framework/test/testReductor.cxx +++ b/Framework/test/testReductor.cxx @@ -14,6 +14,7 @@ /// \author Piotr Konopka /// +#include "QualityControl/CustomParameters.h" #include "QualityControl/Reductor.h" #include "QualityControl/ReductorTObject.h" #include "QualityControl/ReductorConditionAny.h" @@ -23,6 +24,7 @@ #include "QualityControl/ConditionAccess.h" #include #include +#include #define BOOST_TEST_MODULE Reductor test #define BOOST_TEST_MAIN @@ -172,4 +174,30 @@ BOOST_AUTO_TEST_CASE(test_ReductorAnyInterface) Double_t* integrals = tree->GetVal(0); BOOST_CHECK_EQUAL(integrals[0], secret.Length()); -} \ No newline at end of file +} + +BOOST_AUTO_TEST_CASE(test_ReductorConfigurable) +{ + class ReductorTest : public Reductor + { + public: + std::string value{}; + + virtual void* getBranchAddress() + { + return nullptr; + } + virtual const char* getBranchLeafList() + { + value = mCustomParameters.at("key"); + return value.c_str(); + } + }; + + ReductorTest reductor; + CustomParameters params; + params.set("key", "value"); + Reductor& r = reductor; + r.setCustomConfig(params); + BOOST_REQUIRE_EQUAL(r.getBranchLeafList(), "value"); +} diff --git a/Framework/test/testTrendingTask.cxx b/Framework/test/testTrendingTask.cxx index bbe1530041..de9cd3c64f 100644 --- a/Framework/test/testTrendingTask.cxx +++ b/Framework/test/testTrendingTask.cxx @@ -14,6 +14,7 @@ /// \author Piotr Konopka /// +#include "Framework/include/QualityControl/Reductor.h" #include "QualityControl/TrendingTask.h" #include "QualityControl/DatabaseFactory.h" #include "QualityControl/MonitorObject.h" @@ -51,6 +52,25 @@ struct CleanupAtDestruction { std::function mCallback = nullptr; }; +// https://stackoverflow.com/questions/424104/can-i-access-private-members-from-outside-the-class-without-using-friends +template +struct DeclareGlobalGet { + friend typename Accessor::type get(Accessor) { return Member; } +}; + +struct TrendingTaskReductorAccessor { + using type = std::unordered_map> TrendingTask::*; + friend type get(TrendingTaskReductorAccessor); +}; + +struct ReductorConfigAccessor { + using type = CustomParameters Reductor::*; + friend type get(ReductorConfigAccessor); +}; + +template struct DeclareGlobalGet; +template struct DeclareGlobalGet; + TEST_CASE("test_trending_task") { const std::string pid = std::to_string(getpid()); @@ -88,6 +108,13 @@ TEST_CASE("test_trending_task") taskName + R"json(", "name": "testHistoTrending", "reductorName": "o2::quality_control_modules::common::TH1Reductor", + "reductorParameters": { + "default": { + "default": { + "key":"value" + } + } + }, "moduleName": "QcCommon" }, { @@ -146,6 +173,20 @@ TEST_CASE("test_trending_task") task.setObjectsManager(objectManager); REQUIRE_NOTHROW(task.configure(config)); + { + auto& reductors = task.*get(TrendingTaskReductorAccessor()); + size_t foundCount{}; + for (const auto& reductor : reductors) { + auto& config = (*reductor.second.get()).*get(ReductorConfigAccessor()); + if (auto found = config.find("key"); found != config.end()) { + if (found->second == "value") { + foundCount++; + } + } + } + REQUIRE(foundCount == 1); + } + // test initialize() REQUIRE_NOTHROW(task.initialize({ TriggerType::UserOrControl, true, { 0, "NONE", "", "", "qc" }, 1 }, services)); REQUIRE(objectManager->getNumberPublishedObjects() == 1); diff --git a/doc/PostProcessing.md b/doc/PostProcessing.md index 99e89ffb11..4d9d4d1842 100644 --- a/doc/PostProcessing.md +++ b/doc/PostProcessing.md @@ -4,65 +4,66 @@ * [Post-processing](#post-processing) - * [The post-processing framework](#the-post-processing-framework) - * [Post-processing interface](#post-processing-interface) - * [Configuration](#configuration) - * [Definition and access of user-specific configuration](#definition-and-access-of-user-specific-configuration) - * [Running it](#running-it) - * [Convenience classes](#convenience-classes) - * [The TrendingTask class](#the-trendingtask-class) - * [The SliceTrendingTask class](#the-slicetrendingtask-class) - * [The ReferenceComparatorTask class](#the-referencecomparatortask-class) - * [The CcdbInspectorTask class](#the-ccdbinspectortask-class) - * [The QualityTask class](#the-qualitytask-class) - * [The FlagCollectionTask class](#the-flagcollectiontask-class) - * [The BigScreen class](#the-bigscreen-class) - * [More examples](#more-examples) + * [The post-processing framework](#the-post-processing-framework) + * [Post-processing interface](#post-processing-interface) + * [Configuration](#configuration) + * [Definition and access of user-specific configuration](#definition-and-access-of-user-specific-configuration) + * [Running it](#running-it) + * [Convenience classes](#convenience-classes) + * [The TrendingTask class](#the-trendingtask-class) + * [The SliceTrendingTask class](#the-slicetrendingtask-class) + * [The ReferenceComparatorTask class](#the-referencecomparatortask-class) + * [The CcdbInspectorTask class](#the-ccdbinspectortask-class) + * [The QualityTask class](#the-qualitytask-class) + * [The FlagCollectionTask class](#the-flagcollectiontask-class) + * [The BigScreen class](#the-bigscreen-class) + * [More examples](#more-examples) - [← Go back to Modules Development](ModulesDevelopment.md) | [↑ Go to the Table of Content ↑](../README.md) | [Continue to Advanced Topics →](Advanced.md) - ## The post-processing framework This framework is intended for planned post-processing of objects generated by QC Tasks, Checks and correlating them with other data. The most common use-cases include correlation and trending of different properties of the detectors. - + The users can write their own Post-processing Tasks or use the ones provided by the framework (see [Convenience classes](#convenience-classes)) which are supposed to cover the usual needs. Post-processing Tasks run asynchronously to data-taking, but can be triggered by a set of selected events. ### Post-processing interface Any Post-processing Task should inherit PostProcessingInterface, which includes four methods: - * `configure` (optional) - provides the task with configuration. - * `initialize` - initializes the task and its data, given the event which it was triggered by. - * `update` - updates the task and its data, given the event which it was triggered by. - * `finalize` - finalizes the processing, given the event which it was triggered by. +* `configure` (optional) - provides the task with configuration. +* `initialize` - initializes the task and its data, given the event which it was triggered by. +* `update` - updates the task and its data, given the event which it was triggered by. +* `finalize` - finalizes the processing, given the event which it was triggered by. Interfaces to databases and other services are accesible via `ServiceRegistry`, which is an argument to the last three methods. They are invoked when any of the specified triggers is up, which can be: - * Start Of Run - triggers when receives SOSOR message from `aliecs.run` kafka topic which has **DIFFERENT** run number **AND** environment id than `Activity` class in config - * End Of Run - triggers when receives SOEOR message from `aliecs.run` kafka topic which has **DIFFERENT** run number **AND** environment id than `Activity` class in config - * Start Of Fill (SOF, not implemented yet) - * End Of Fill (EOF, not implemented yet) - * Periodic - triggers when a specified period of time passes - * New Object - triggers when an object in QCDB is updated - * For Each Object - triggers for each object in QCDB which matches an Activity - * For Each Latest - triggers for the latest object in QCDB for each matching Activity, sorted by Period, Pass and Run - * Once - triggers only first time it is checked - * Always - triggers each time it is checked + +* Start Of Run - triggers when receives SOSOR message from `aliecs.run` kafka topic which has **DIFFERENT** run number **AND** environment id than `Activity` class in config +* End Of Run - triggers when receives SOEOR message from `aliecs.run` kafka topic which has **DIFFERENT** run number **AND** environment id than `Activity` class in config +* Start Of Fill (SOF, not implemented yet) +* End Of Fill (EOF, not implemented yet) +* Periodic - triggers when a specified period of time passes +* New Object - triggers when an object in QCDB is updated +* For Each Object - triggers for each object in QCDB which matches an Activity +* For Each Latest - triggers for the latest object in QCDB for each matching Activity, sorted by Period, Pass and Run +* Once - triggers only first time it is checked +* Always - triggers each time it is checked Triggers are complemented with: -- timestamps which correspond the time when trigger started to be valid, in form of ms since epoch, just like in CCDB and QCDB, -- `last` flag, being `true` if it is the last time trigger will fire, -- `Activity` object, which contains metadata such as run type and number, pass name, period name, data provenance. + +* timestamps which correspond the time when trigger started to be valid, in form of ms since epoch, just like in CCDB and QCDB, +* `last` flag, being `true` if it is the last time trigger will fire, +* `Activity` object, which contains metadata such as run type and number, pass name, period name, data provenance. For example, the periodic trigger will provide evenly spaced timestamps, even if the trigger is checked more rarely. The New Object trigger provide the timestamp of the updated object. The timestamps and Activites should be used to access databases, so any Post-processing Task can be rerun at any time for any run and reconstruction pass. -The Activity specified at the top of the configuration file is used to for triggers to match objects which belong to +The Activity specified at the top of the configuration file is used to for triggers to match objects which belong to certain run, pass, period. A lack of a parameter or a default value are treated as a wildcard. Since AliECS overwrites the run number during initialization, one may force the run number wildcard by adding the following key-value pair: + ```json { "qc": { @@ -73,7 +74,6 @@ the run number during initialization, one may force the run number wildcard by a }, ``` - MonitorObjects may be saved by registering them in ObjectManager, similarly to normal QC Tasks (recommended, see examples linked below), or by using DatabaseInterface directly. Please note, that created objects have to registered in ObjectManager to make them accessible by Checks. @@ -118,11 +118,12 @@ This is a snippet of a JSON structure which configures a post-processing task: } } ``` + Each task is uniquely identified by its id (`MyPostProcessingTaskID`). One can activate it by setting the `"active"` field to `"true"`. The task is loaded given its full `"className"` and a `"moduleName"` where it is located. The `"detectorName"` might be used by tasks to store generated data in correct paths in QCDB. The `"initTrigger"`, `"updateTrigger"` and `"stopTrigger"` lists contain triggers which should invoke corresponding interface methods. The `"periodSeconds"` parameter in the common section defines how often should the triggers be checked. Values larger than 10 seconds should be applied when running synchronously to data taking, while very small periods can be used when processing batches of already existing objects. Checks can be applied to the results of Post-processing Tasks just as for normal QC Tasks. However, one should use data source type of `"PostProcessing"` instead of `"Task"`: - + ``` ... "checks": { @@ -146,29 +147,29 @@ Checks can be applied to the results of Post-processing Tasks just as for normal A postprocessing task can access custom parameters declared in the configuration file at `qc.postprocessing..extendedTaskParameters`. They are stored inside an object of type `CustomParameters` named `mCustomParameters`, which is a protected member of `TaskInterface`. -[More details](Advanced.md#definition-and-access-of-user-specific-configuration) can be found about this feature in the Tasks (same behaviour). +[More details](Advanced.md#definition-and-access-of-user-specific-configuration) can be found about this feature in the Tasks (same behaviour). #### Triggers configuration Each of the three methods can be invoked by one or more triggers. Below are listed the possible options (case insensitive). - * `"sor"` or `"startofrun"` - start of a **different** run (useful for long-running post-processing which observes many data taking runs). Please ensure that the configuration file includes a kafka broker. - * `"eor"` or `"endofrun"` - end of a **different** run (useful for long-running post-processing which observes many data taking runs). Please ensure that the configuration file includes a kafka broker. - * `"sof"` or `"startoffill"` - Start Of Fill (not implemented yet) - * `"eof"` or `"endoffill"` - End Of Fill (not implemented yet) - * `""` - Periodic - triggers when a specified period of time passes. For example: "5min", "0.001 seconds", "10sec", "2hours". - * `"newobject:[qcdb/ccdb]:"` - New Object - triggers when an object in QCDB or CCDB is updated (applicable for synchronous processing). For example: `"newobject:qcdb:qc/TST/MO/QcTask/Example"` - * `"foreachobject:[qcdb/ccdb]:"` - For Each Object - triggers for each object in QCDB or CCDB which matches the activity indicated in the QC config file (applicable for asynchronous processing). - * `"foreachlatest:[qcdb/ccdb]:"` - For Each Latest - triggers for the latest object version in QCDB or CCDB - for each matching activity (applicable for asynchronous processing). It sorts objects in ascending order by period, +* `"sor"` or `"startofrun"` - start of a **different** run (useful for long-running post-processing which observes many data taking runs). Please ensure that the configuration file includes a kafka broker. +* `"eor"` or `"endofrun"` - end of a **different** run (useful for long-running post-processing which observes many data taking runs). Please ensure that the configuration file includes a kafka broker. +* `"sof"` or `"startoffill"` - Start Of Fill (not implemented yet) +* `"eof"` or `"endoffill"` - End Of Fill (not implemented yet) +* `""` - Periodic - triggers when a specified period of time passes. For example: "5min", "0.001 seconds", "10sec", "2hours". +* `"newobject:[qcdb/ccdb]:"` - New Object - triggers when an object in QCDB or CCDB is updated (applicable for synchronous processing). For example: `"newobject:qcdb:qc/TST/MO/QcTask/Example"` +* `"foreachobject:[qcdb/ccdb]:"` - For Each Object - triggers for each object in QCDB or CCDB which matches the activity indicated in the QC config file (applicable for asynchronous processing). +* `"foreachlatest:[qcdb/ccdb]:"` - For Each Latest - triggers for the latest object version in QCDB or CCDB + for each matching activity (applicable for asynchronous processing). It sorts objects in ascending order by period, pass and run. - * `"once"` - Once - triggers only first time it is checked - * `"always"` - Always - triggers each time it is checked +* `"once"` - Once - triggers only first time it is checked +* `"always"` - Always - triggers each time it is checked -#### Using different databases +#### Using different databases It might happen that one wants to get data and store data in different databases. Typically if you want to test with -production data but store the object in test. +production data but store the object in test. This can be achieved by setting the extra parameter `sourceRepo` in the task. You have to add it to all your tasks as this is not a global parameter. It is optional. @@ -195,13 +196,16 @@ The post-processing tasks can be run in three ways. First uses the usual `o2-qc` it is the only one which allows to run checks over objects generated in post-processing tasks. This is will be one of two ways to run PP tasks in production. To try it out, use it like for any other QC configuration: + ``` o2-qc -b --config json://${QUALITYCONTROL_ROOT}/etc/postprocessing.json ``` + All declared and active tasks in the configuration file will be run in parallel. To change how often triggers are evaluated, modify the value for `qc.config.postprocessing.periodSeconds` in the config file. To run a different configuration which trends all the `qc/TST/MO/QcTask/example` objects existing in QCDB, try the following: + ``` o2-qc -b --config json://${QUALITYCONTROL_ROOT}/etc/postprocessing-async.json ``` @@ -228,22 +232,28 @@ To have more control over the state transitions or to run a standalone post-proc its state transitions and push the configuration. To try it out locally, run the following in the first terminal window (we will try out a different task this time): + ``` o2-qc-run-postprocessing-occ --name ExampleTrend --period 10 ``` -In the logs you will see a port number which listens for RPC commands. Remember it. + +In the logs you will see a port number which listens for RPC commands. Remember it. + ``` no control port configured, defaulting to 47100 no role configured, defaulting to default-role gRPC server listening on port 47100 ``` + In the second window, run the following. Use the port number from the output of the QC executable. + ``` # If you haven't built it: # aliBuild build Coconut --defaults o2-dataflow alienv enter coconut/latest OCC_CONTROL_PORT=47100 peanut ``` + A simple terminal user interface will open, which will allow you to trigger state transitions. Use it to load the configuration by entering the path to the configuration file. The usual transition sequence, which you might want to try out, is CONFIGURE, START, STOP, RESET, EXIT. ## Convenience classes @@ -255,7 +265,7 @@ We aim to provide some convenience classes which should cover the most common po `TrendingTask` is a post-processing task which uses a TTree to trend objects in the QC database and produce basic plots. The [Post-processing example](QuickStart.md#post-processing-example) in the QuickStart showcases the possibilities of this class. The following scheme shows how the class is designed. -It can access **data sources** which are Monitor Objects and Quality Objects from the QCDB, as well as custom +It can access **data sources** which are Monitor Objects and Quality Objects from the QCDB, as well as custom objects from the CCDB. The objects' characteristics which should be tracked are extracted by **Reductors** - simple plugins. @@ -267,14 +277,14 @@ Each data source forms a separate branch, with its leaves being the individual v Additionally added columns include a `time` branch and a `metadata` branch (now consisting only of `runNumber`). The TTree is stored back to the **QC database** each time it is updated. -In addition, the class exposes the [`TTree::Draw`](https://root.cern/doc/master/classTTree.html#a73450649dc6e54b5b94516c468523e45) interface, which allows to instantaneously generate **plots** with trends, correlations or histograms that are also sent to the QC database. +In addition, the class exposes the [`TTree::Draw`](https://root.cern/doc/master/classTTree.html#a73450649dc6e54b5b94516c468523e45) interface, which allows to instantaneously generate **plots** with trends, correlations or histograms that are also sent to the QC database. Multiple graphs can be drawn on one plot, if needed. ![TrendingTask](images/trending-task.png) #### Configuration -As this class is a post-processing task, it inherits also its configuration JSON template. It extends it, though, +As this class is a post-processing task, it inherits also its configuration JSON template. It extends it, though, some additional parameters. ``` json @@ -302,12 +312,15 @@ some additional parameters. ``` Data sources are defined by filling the corresponding structure, as in the example below. For the key `"type"` use: -- `"repository"` if you access a Monitor Object. -- `"repository-quality"` if that should be a Quality Object. -- `"condition"` for trending an object of any type in CCDB. + +* `"repository"` if you access a Monitor Object. +* `"repository-quality"` if that should be a Quality Object. +* `"condition"` for trending an object of any type in CCDB. The `"names"` array should point to one or more objects under a common `"path"` in the repository. The values of `"reductorName"` and `"moduleName"` should point to a full name of a data Reductor and a library where it is located. One can use the Reductors available in the `Common` module or write their own by inheriting the interface class. +Field `"reductorParameters"` is used to configure Reductors (classes inherited from `"o2::quality_control::postprocessing::Reductor"` interface). It uses same format as `"extendedTaskParameters"` field. + ``` json { ... @@ -331,6 +344,14 @@ The `"names"` array should point to one or more objects under a common `"path"` "path": "GRP/Calib", "names": [ "LHCClockPhase" ], "reductorName": "o2::quality_control_modules::common::LHCClockPhaseReductor", + "reductorParameters": { + "default": { + "default": { + "key":"value" + } + } + } + "moduleName": "QcCommon" } ], @@ -345,6 +366,7 @@ If there is more than one graph drawn on a plot, a legend will be automatically Optionally, one can use `"graphError"` to add x and y error bars to a graph, as in the first plot example. The `"name"` and `"varexp"` are the only compulsory arguments, others can be omitted to reduce configuration files size. `"graphAxisLabel"` allows the user to set axis labels in the form of `"Label Y axis: Label X axis"`. With `"graphYRange"` numerical values for fixed ranges of the y axis can be provided in the form of `"Min:Max"`. + ``` json { ... @@ -385,7 +407,7 @@ The `"name"` and `"varexp"` are the only compulsory arguments, others can be omi } ``` -To decide whether plots should be generated during each update or just during finalization, +To decide whether plots should be generated during each update or just during finalization, use the boolean flag `"producePlotsOnUpdate"`. To pick up the last existing trend which matches the specified Activity, set `"resumeTrend"` to `"true"`. @@ -396,9 +418,11 @@ To generate plots only when all input objects are available, set `"trendIfAllInp The available options are `"trigger"` (timestamp provided by the trigger), `"validFrom"` (validity start in activity provided by the trigger), `"validUntil"` (validity end in activity provided by the trigger, default). ### The SliceTrendingTask class + The `SliceTrendingTask` is a complementary task to the standard `TrendingTask`. This task allows the trending of canvas objects that hold multiple histograms (which have to be of the same dimension, e.g. TH1) and the slicing of histograms. The latter option allows the user to divide a histogram into multiple subsections along one or two dimensions which are trended in parallel to each other. The task has specific reductors for `TH1` and `TH2` objects which are `o2::quality_control_modules::common::TH1SliceReductor` and `o2::quality_control_modules::common::TH2SliceReductor`. #### Configuration + Similar to the `TrendingTask`, the configuration of the `SliceTrendingTask` is divided into `"dataSources"` and `"plots"`, where both parts have been extended in respect to the standard trending. Here, only changes in respect to the standard trending task are highlighted. The data sources are extended by `"axisDivision"` which configures the slicing of the histograms. The inner most brackets relate the the actual axis. Its configuration can be understood as `"axisDivision": [ [x-Axis], [y-Axis] ]` where `[y-Axis]` does not need to be provided in case of one-dimensional objects. The values provided in `[x(y)-Axis]` are the numerical boundaries of the x(y)-axis. For *n* slices, one thus needs to provide *n*+1 values in ascending order. Protections are added such that each bin is part of only one slice. If the outer brackets are left empty (i.e. `"axisDivision": [ ]`), no slicing is applied and the whole histogram is trended as in the standard trending task. @@ -419,25 +443,29 @@ The data sources are extended by `"axisDivision"` which configures the slicing o ... } ``` + The `"plot"` configuration has changed in respect to the standard trending task as follows: The `"varexp"` selection is still set up as `"Histogram.Var:TrendingType"` where `"Histogram.Var"` is trended vs `"TrendingType"`. The options for `"Var"`are: -- `"entries"`: Number of entries of the slice -- `"meanX"`: Mean along the x-axis of the slice -- `"stddevX"`: Stddev along the x-axis of the slice -- `"errMeanX"`: Error of the mean along the x-axis of the slice -- `"meanY"`: Mean along the y-axis of the slice. -- `"stddevY"`: Stddev along the y-axis of the slice -- `"errMeanY"`: Error of the mean along the y-axis of the slice + +* `"entries"`: Number of entries of the slice +* `"meanX"`: Mean along the x-axis of the slice +* `"stddevX"`: Stddev along the x-axis of the slice +* `"errMeanX"`: Error of the mean along the x-axis of the slice +* `"meanY"`: Mean along the y-axis of the slice. +* `"stddevY"`: Stddev along the y-axis of the slice +* `"errMeanY"`: Error of the mean along the y-axis of the slice In case of 1 dimensional objects, `"meanY"` is calculated as the arithmetic mean of all the bin values in the slice. The respective `"stddevY"` and `"errMeanY"` are provided as well. The options for `"TrendingType"` are limited to: -- `"time"`: The quantity `"Histogram.Var"` of all slices is trended as a function of time. Each slice-trending has its own graph which are all published on one canvas. -- `"multigraphtime"`: The quantity `"Histogram.Var"` of all slices is trended as a function of time. All slice-trendings are published on one `"TMultiGraph"`. A legend is provided which contains the numerical boundaries of the slices. -- `"slices"`: The quantity `"Histogram.Var"` of all slices is trended as a function of the geometrical center of the slices. Always the latest timestamp is plotted. -- `"slices2D"`: The quantity `"Histogram.Var"` of all slices is trended as a function of the geometrical center of the slices in two dimensions. Always the latest timestamp is plotted. Errors (if used) are stored per bin but are not visualized. + +* `"time"`: The quantity `"Histogram.Var"` of all slices is trended as a function of time. Each slice-trending has its own graph which are all published on one canvas. +* `"multigraphtime"`: The quantity `"Histogram.Var"` of all slices is trended as a function of time. All slice-trendings are published on one `"TMultiGraph"`. A legend is provided which contains the numerical boundaries of the slices. +* `"slices"`: The quantity `"Histogram.Var"` of all slices is trended as a function of the geometrical center of the slices. Always the latest timestamp is plotted. +* `"slices2D"`: The quantity `"Histogram.Var"` of all slices is trended as a function of the geometrical center of the slices in two dimensions. Always the latest timestamp is plotted. Errors (if used) are stored per bin but are not visualized. The field `"graphErrors"` is set up as `"graphErrors":"Var1:Var2"` where `Var1` is the error along y and `Var2` the error along x. For `Var1(2)` numerical values or the options listed for `Var` above can be set. The original histogram does not need to be provided as the task will take the histogram specified in `"varexp": "Histogram.Var:TrendingType"`. In `"graphYRange"` and `"graphXRange"` numerical values for fixed ranges of the x and y axis can be provided in the form of `"Min:Max"`. If provided, the task will set all x (or y) axis on the canvas to this range. `"graphAxisLabel"` allows the user to set axis labels in the form of `"Label Y axis: Label X axis"`. + ``` { ... @@ -492,6 +520,7 @@ The `ignorePeriodForReference` and `ignorePassForReference` boolean parameters c A value of `"1"` (default) means that the reference plots are not required to match the period and/or pass names of the current run, while a value of `"0"` means that the reference plot is retrieved only if the corresponding period and/or pass names match those of the current run. The input MonitorObjects to be processed are logically divided in **dataGroups**. Each group is configured via the following parameters: + * `inputPath`: path in the QCDB where the input objects are located * `referencePath` (optional): specifies the path for the reference objects, if not set the `inputPath` is used * `outputPath`: path in the QCDB where the output objects are stored @@ -506,6 +535,7 @@ The `normalizeReference` boolean parameter controls wether the reference histogr The checker extracts the current and reference plots from the stored MO, and compares them using external modules, specified via the `moduleName` and `comparatorName` parameters. The `threshold` parameter specifies the value used to discriminate between good and bad matches between the histograms. Four comparison modules are provided in the framework: + 1. `o2::quality_control_modules::common::ObjectComparatorDeviation`: comparison based on the average relative deviation between the bins of the current and reference histograms; the module accepts the following configuration parameters: * `threshold`: the maximum allowed average relative deviation between current and reference histograms * `rangeX`, `rangeY`: if set, the comparison is restricted to the bins in the specified X and Y ranges; bins outside the ranges are ignored @@ -539,14 +569,15 @@ The following example specifies a threshold value common to all the plots, and t #### Full configuration example In the example configuration below, the relationship between the input and output histograms is the following: + * `MCH/MO/Tracks/WithCuts/TrackEta` (1-D histogram) - * `MCH/MO/RefComp/TracksMCH/WithCuts/TrackEta` - * 1-D version, current and reference plots drawn superimposed in the same canvas, with the ratio below - * comparison with a chi2 test method + * `MCH/MO/RefComp/TracksMCH/WithCuts/TrackEta` + * 1-D version, current and reference plots drawn superimposed in the same canvas, with the ratio below + * comparison with a chi2 test method * `MCH/MO/Tracks/WithCuts/TrackEtaPhi` (2-D histogram) - * `MCH/MO/RefComp/TracksMCH/WithCuts/TrackEtaPhi` - * 2-D version, ratio between plots drawn on top with the current and reference plots drawn smaller at the bottom - * comparison with a chi2 test method (`"comparatorName" : "o2::quality_control_modules::common::ObjectComparatorChi2`) + * `MCH/MO/RefComp/TracksMCH/WithCuts/TrackEtaPhi` + * 2-D version, ratio between plots drawn on top with the current and reference plots drawn smaller at the bottom + * comparison with a chi2 test method (`"comparatorName" : "o2::quality_control_modules::common::ObjectComparatorChi2`) ```json { @@ -636,7 +667,6 @@ In the example configuration below, the relationship between the input and outpu } ``` - ### The CcdbInspectorTask class A post-processing task that checks the existence, time stamp and validity of CCDB/QCDB objects. @@ -647,17 +677,19 @@ A `CcdbInspectorCheck` task receives the 2-D histogram produced by the CCDB insp #### Configuration The input objects are specified in the `DataSources` section. Each object is identified by the following parameters: + * `name`: the name of the object, which is used to label the X-axis bins of the output histogram * `path`: the path of the object in the database * `updatePolicy`: the policy with wich the object is updated. Possible values are: - - `atSOR`: the object is only created once after start-of-run - - `atEOR`: the object is only created once at end-of-run - - `periodic`: the object is created periodically during the run + * `atSOR`: the object is only created once after start-of-run + * `atEOR`: the object is only created once at end-of-run + * `periodic`: the object is created periodically during the run * `cycleDuration`: for periodic objects, the time interval between updates * `validatorName`: (optional) name of the software module used to validate the contents of the object * `moduleName`: library where the validator module is located The task accepts the following configuration parameters: + * `timeStampTolerance`: tolerance (in seconds) applied when comparing the actual and expected object time stamp * `databaseType`: type of input database. Possible values are `ccdb` or `qcdb` (default: `ccdb`) * `databaseUrl`: address of the database (default: `https://alice-ccdb.cern.ch`) @@ -754,7 +786,8 @@ Hence, the trending and 1-D distribution can be used to estimate the fraction of ![QualityTask](images/quality-task.png) #### Configuration -The QualityObjects to be monitored and displayed are passed as **qualityGroups**, each containing **inputObjects** for a specific, line-separated group. + +The QualityObjects to be monitored and displayed are passed as **qualityGroups**, each containing **inputObjects** for a specific, line-separated group. Each group requires a **name** and **path** to the contained objects. A **title** can be added optionally, which appears at the top of the group in the canvas. By listing certain qualities in **ignoreQualitiesDetails** one can ask to ignore Flags associated to QualityObjects. @@ -764,9 +797,10 @@ A **title** can be added, which is used in the summary canvas to denote given Qu If it is absent, **name** is used instead. Optionally, one can add **messageBad**, **messageMedium**, **messageGood**, **messageNull** to add a message when a particular Quality is seen. -At each update, the task retrieves the latest version of each input QualityObject, even if their validity range ends in the past. A task configuration parameter, called `maxObjectAgeSeconds`, allows to define the maximum allowed age (in seconds) of the retrieved objects. The age is defined as the difference between the the time stamp of the task update and the creation time stamp of the retrieved object. +At each update, the task retrieves the latest version of each input QualityObject, even if their validity range ends in the past. A task configuration parameter, called `maxObjectAgeSeconds`, allows to define the maximum allowed age (in seconds) of the retrieved objects. The age is defined as the difference between the the time stamp of the task update and the creation time stamp of the retrieved object. Here is a complete example of `QualityTask` configuration: + ```json { "qc": { @@ -851,6 +885,7 @@ o2-qc-run-postprocessing --config json://${QUALITYCONTROL_ROOT}/Modules/Common/e ``` The task is configured as follows: + ```json { "qc": { @@ -893,6 +928,7 @@ In addition, the boxes are filled with a grey color is the corresponding Quality The color of the canvas background and of the detector labels can be customized with the `"foregroundColor"` and `"backgroundColor"` parameters. They accept interger values corresponding to the indexes of the [default ROOT colors](https://root.cern.ch/doc/master/classTColor.html#C01) or the indexes defined in the [color wheel](https://root.cern.ch/doc/master/classTColor.html#C02). The example below shows a color combination with white text over a dark gray background. The task is configured as follows: + ```json { "qc": { @@ -969,9 +1005,11 @@ The following options allow to configure the appearence and behavior of the task * `labels`: comma-separated list of labels with boxes to be displayed in the canvas. Some places in the grid of boxes can be left empty by inserting two consecutive commas in the list, like between `TRD` and `TRK` in the example above The names in the data sources are composed of two parts, separated by a colon: + ``` LABEL:OBJECT_PATH ``` + The `LABEL` should match one of the elements of the `labels` parameter. The quality object will be associated to the corresponding box. ## More examples @@ -981,12 +1019,14 @@ This section contains examples of how to approach usual use-cases. ### I want to run postprocessing alongside of synchronous QC and trend some QC object parameters Use NewObject as the update trigger: + ```json "updateTrigger": [ "newobject:qcdb:TST/MO/QcTask/example" ], ``` -If the post-processing runs in a different AliECS environment than the acquisition run, one should add the following +If the post-processing runs in a different AliECS environment than the acquisition run, one should add the following flag. Since AliECS adds a concrete run number to the workflow, the triggers would match only objects from the same run. + ```json "qc": { "config": { @@ -1000,17 +1040,20 @@ flag. Since AliECS adds a concrete run number to the workflow, the triggers woul ### I want to run postprocessing synchronously and trend an object in CCDB Use Periodic or NewObject (but with `ccdb` as the DB) as the update trigger: + ```json "updateTrigger": [ "newobject:ccdb:TPC/Calib/IDC_0_A" ], ``` + or + ```json "updateTrigger": [ "60s" ], ``` In your trending task, make sure to retrieve the object from the CCDB, not QCDB. -If you want to keep the task running regardless of the data-taking activity, please contact the QC developers to set +If you want to keep the task running regardless of the data-taking activity, please contact the QC developers to set up a long-running workflow in Nomad. ### I want to run trend a moving window in a synchronous QC @@ -1019,12 +1062,14 @@ In your QC task, enable the moving window feature on the selected plot. More details can be found in [Advanced/Moving window](Advanced.md#moving-window). Use the NewObject trigger on the moving window to update the task: + ```json "updateTrigger": [ "newobject:qcdb:TST/MO/QcTask/mw/example" ], ``` In your postprocessing task, retrieve the object you want to trend. For TrendingTask, it would be: + ```json "dataSources": [ { @@ -1040,13 +1085,16 @@ For TrendingTask, it would be: ### I want to run postprocessing on all already existing objects for a run Use ForEachObject as the update trigger: + ```json "updateTrigger": [ "foreachobject:qcdb:TST/MO/QcTask/example" ], ``` -Since objects are usually published in collections at the same time, you can use a path for one object to be triggered + +Since objects are usually published in collections at the same time, you can use a path for one object to be triggered for a collection of them (all objects produced by a QC Task). Use the Activity which matches the run, and (optionally) period and pass name: + ```json "Activity": { "number": "3212", @@ -1063,10 +1111,13 @@ Use the Activity which matches the run, and (optionally) period and pass name: ### I want to run postprocessing for all objects in all the runs of a given reconstruction pass and period Use ForEachObject as the update trigger: + ```json "updateTrigger": [ "foreachobject:qcdb:TST/MO/QcTask/example" ], ``` + Use the Activity which leaves the run number empty, but indicate the pass and period names. + ```json "Activity": { "number": "", @@ -1083,11 +1134,14 @@ Use the Activity which leaves the run number empty, but indicate the pass and pe ### I want to run postprocessing for all objects in all the runs of a given reconstruction pass and period which are valid in given time interval Use ForEachObject as the update trigger: + ```json "updateTrigger": [ "foreachobject:qcdb:TST/MO/QcTask/example" ], ``` + Use the Activity which leaves the run number empty, but indicate the pass and period names. Add `start` and `end` values in ms since epoch to restrict the validity start of objects. + ```json "Activity": { "number": "", @@ -1106,12 +1160,15 @@ Add `start` and `end` values in ms since epoch to restrict the validity start of ### I want to run postprocessing for the latest object for each available run in a given pass and period Use ForEachObject as the update trigger: + ```json "updateTrigger": [ "foreachlatest:qcdb:TST/MO/QcTask/example" ], ``` + This way you will avoid iterating on potential duplicates and intermediate objects, and get only the final versions instead. Use the Activity which leaves the run number empty, but indicate the pass and period names. + ```json "Activity": { "number": "",