Skip to content

multivariate anomaly detection #43

@vinaaaaay

Description

@vinaaaaay

I am using TSPulse to detect anomalies (eventually incorporate the same in AgentOps competition)

anomaly_model = TSPulseForReconstruction.from_pretrained(
"ibm-granite/granite-timeseries-tspulse-r1",
num_input_channels=1,
revision="main",
mask_type="user",
)

pipeline = TimeSeriesAnomalyDetectionPipeline(
anomaly_model,
timestamp_column="agg_timestamp",
target_columns=[y2_tag, y3_tag],
prediction_mode=[
AnomalyScoreMethods.FREQUENCY_RECONSTRUCTION.value,
AnomalyScoreMethods.PREDICTIVE.value
],
aggregation_length=32,
aggr_function="max",
smoothing_length=1, #no smoothing, we're looking for 1 off events
least_significant_scale=0.01,
least_significant_score=0.1,
)

result = pipeline(filtered_df, batch_size=32, predictive_score_smoothing=False)

The multi-variate targets are specified in the statement: target_columns=[y2_tag, y3_tag],

both y2_tag, y3_tag by itself (as uni variate data) produce results. No error during execution.

when I try as a multi-variate model, get the following error. Trying to find a working example or settings
for how to set up multivariate pipeline. Thank you

Error during multivariate execution:

summary from the bottom: RuntimeError: The size of tensor a (256) must match the size of tensor b (128) at non-singleton dimension 0

/usr/local/lib/python3.12/dist-packages/transformers/pipelines/base.py in call(self, inputs, num_workers, batch_size, *args, **kwargs)
1465 )
1466 else:
-> 1467 return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
1468
1469 def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):

/usr/local/lib/python3.12/dist-packages/tsfm_public/toolkit/time_series_anomaly_detection_pipeline.py in run_single(self, inputs, preprocess_params, forward_params, postprocess_params)
320 with torch.no_grad(): # check if really needed
321 while (batch := next(it, None)) is not None:
--> 322 scores = self.forward(batch, **forward_params)
323 for key in scores:
324 accumulator[key].append(scores[key])

/usr/local/lib/python3.12/dist-packages/transformers/pipelines/base.py in forward(self, model_inputs, **forward_params)
1372 with inference_context():
1373 model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
-> 1374 model_outputs = self._forward(model_inputs, **forward_params)
1375 model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
1376 else:

/usr/local/lib/python3.12/dist-packages/tsfm_public/toolkit/time_series_anomaly_detection_pipeline.py in _forward(self, input_tensors, **kwargs)
339 original input keys.
340 """
--> 341 return self._model_processor.compute_score(input_tensors, **kwargs)
342
343 def postprocess(self, model_outputs, **postprocess_parameters):

/usr/local/lib/python3.12/dist-packages/tsfm_public/models/tspulse/utils/ad_helpers.py in compute_score(self, payload, expand_score, **kwargs)
130 if use_forecast:
131 # model_forward_output = self._model(**payload)
--> 132 model_forward_output = self._model(batch_x)
133
134 stitched_dict = {}

/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs)
1771 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1772 else:
-> 1773 return self._call_impl(*args, **kwargs)
1774
1775 # torchrec tests the code consistency with the following code

/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1782 or _global_backward_pre_hooks or _global_backward_hooks
1783 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1784 return forward_call(*args, **kwargs)
1785
1786 result = None

/usr/local/lib/python3.12/dist-packages/tsfm_public/models/tspulse/modeling_tspulse.py in forward(self, past_values, future_values, past_observed_mask, output_hidden_states, return_loss, return_dict)
2138 scale = model_output.scale
2139
-> 2140 decoder_with_head_output = self.decoder_with_head(
2141 decoder_input=decoder_input,
2142 loc=loc,

/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py in _wrapped_call_impl(self, *args, **kwargs)
1771 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1772 else:
-> 1773 return self._call_impl(*args, **kwargs)
1774
1775 # torchrec tests the code consistency with the following code

/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1782 or _global_backward_pre_hooks or _global_backward_hooks
1783 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1784 return forward_call(*args, **kwargs)
1785
1786 result = None

/usr/local/lib/python3.12/dist-packages/tsfm_public/models/tspulse/modeling_tspulse.py in forward(self, decoder_input, loc, scale, output_hidden_states, return_dict, fft_base_component, fft_real_max, fft_imag_max)
3312 half_pos = reconstruction_fft_outputs.shape[1] // 2
3313
-> 3314 real_part = reconstruction_fft_outputs[:, :half_pos, :] * fft_real_max
3315 imag_part = reconstruction_fft_outputs[:, half_pos:, :] * fft_imag_max
3316

RuntimeError: The size of tensor a (256) must match the size of tensor b (128) at non-singleton dimension 0

Metadata

Metadata

Assignees

No one assigned

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions