@@ -82,10 +82,10 @@ def test_forward_per_tensor(self, device, X):
8282 X , scale , zero_point , quant_min , quant_max )
8383 np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
8484
85+ @unittest .skip ("temporarily disable the test" )
8586 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
8687 X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
8788 qparams = hu .qparams (dtypes = torch .quint8 )))
88- @unittest .skip ("temporarily disable the test" )
8989 def test_backward_per_tensor (self , device , X ):
9090 r"""Tests the backward method.
9191 """
@@ -105,11 +105,11 @@ def test_backward_per_tensor(self, device, X):
105105 Y_prime .backward (dout )
106106 np .testing .assert_allclose (dX .cpu (), X .grad .cpu ().detach ().numpy (), rtol = tolerance , atol = tolerance )
107107
108+ # https://github.com/pytorch/pytorch/issues/30604
109+ @unittest .skip ("temporarily disable the test" )
108110 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
109111 X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
110112 qparams = hu .qparams (dtypes = torch .quint8 )))
111- # https://github.com/pytorch/pytorch/issues/30604
112- @unittest .skip ("temporarily disable the test" )
113113 def test_numerical_consistency_per_tensor (self , device , X ):
114114 r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
115115 """
@@ -125,6 +125,7 @@ def test_numerical_consistency_per_tensor(self, device, X):
125125 X , scale , zero_point , quant_min , quant_max )
126126 np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
127127
128+ @unittest .skip ("temporarily disable the test" )
128129 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
129130 X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
130131 qparams = hu .qparams (dtypes = [torch .quint8 ])),
@@ -246,10 +247,10 @@ def test_backward_per_channel(self, device, X):
246247 Y_prime .backward (dout )
247248 np .testing .assert_allclose (dX .cpu ().detach ().numpy (), X .grad .cpu ().detach ().numpy (), rtol = tolerance , atol = tolerance )
248249
250+ @unittest .skip ("temporarily disable the test" )
249251 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
250252 X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
251253 qparams = hu .qparams (dtypes = torch .quint8 )))
252- @unittest .skip ("temporarily disable the test" )
253254 def test_numerical_consistency_per_channel (self , device , X ):
254255 r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
255256 """
@@ -267,6 +268,7 @@ def test_numerical_consistency_per_channel(self, device, X):
267268 X , scale , zero_point , axis , quant_min , quant_max )
268269 np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
269270
271+ @unittest .skip ("temporarily disable the test" )
270272 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
271273 X = hu .per_channel_tensor (shapes = hu .array_shapes (2 , 5 ,),
272274 qparams = hu .qparams (dtypes = torch .qint8 )))
0 commit comments