diff --git a/tests/decisiontree/test_estimators.py b/tests/decisiontree/test_estimators.py index 20092c7..9f6c185 100644 --- a/tests/decisiontree/test_estimators.py +++ b/tests/decisiontree/test_estimators.py @@ -30,16 +30,12 @@ def test_growth_params_(self): ) def test_fit(self): - try: + with pytest.raises(NotImplementedError): self.model.fit(None, None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("DecisionTreeTemplate.fit expectedly refused call") def test_predict(self): - try: + with pytest.raises(NotImplementedError): self.model.predict(None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("DecisionTreeTemplate.predict expectedly refused call") def test_select_samples_and_features_no_sampling(self): self.model.frac_features = 1.0 diff --git a/tests/decisiontree/test_node.py b/tests/decisiontree/test_node.py index 9203810..6d92468 100644 --- a/tests/decisiontree/test_node.py +++ b/tests/decisiontree/test_node.py @@ -64,7 +64,7 @@ def test_Node(int_val, float_val, node_val, str_val, bool_val): if is_okay: raise ex else: - pytest.xfail("SplitScore validation failed as expected") + pass # SplitScore validation failed as expected else: for att in [ "array_column", diff --git a/tests/decisiontree/test_split.py b/tests/decisiontree/test_split.py index 266b5de..6c2653a 100644 --- a/tests/decisiontree/test_split.py +++ b/tests/decisiontree/test_split.py @@ -77,7 +77,7 @@ def test_BestSplit(score, column, threshold, target_groups, default_is_left): if is_okay: raise ex else: - pytest.xfail("BestSplit validation failed as expected") + pass # BestSplit validation failed as expected else: if is_bad: pytest.fail( @@ -472,7 +472,7 @@ def test_1d( ) except ValueError as ex: if is_homogenous: - pytest.xfail("Splitting a homogneous y failed as expected") + pass # Splitting a homogneous y failed as expected else: raise ex else: @@ -524,7 +524,7 @@ def test_1d_missing( ) except ValueError as ex: if is_homogenous: - pytest.xfail("Splitting a homogneous y failed as expected") + pass # Splitting a homogneous y failed as expected else: raise ex else: @@ -576,7 +576,7 @@ def test_2d( ) except ValueError as ex: if is_homogenous: - pytest.xfail("Splitting a homogneous y failed as expected") + pass # Splitting a homogneous y failed as expected else: raise ex else: @@ -629,7 +629,7 @@ def test_2d_missing( ) except ValueError as ex: if is_homogenous: - pytest.xfail("Splitting a homogneous y failed as expected") + pass # Splitting a homogneous y failed as expected else: raise ex else: diff --git a/tests/decisiontree/test_split_objects.py b/tests/decisiontree/test_split_objects.py index e6942f9..ee0f712 100644 --- a/tests/decisiontree/test_split_objects.py +++ b/tests/decisiontree/test_split_objects.py @@ -26,7 +26,7 @@ def test_SplitScore(name, value): if is_okay: raise ValueError(f"whoops {name=} {value=} failed with {ex}") else: - pytest.xfail("SplitScore validation failed as expected") + pass # SplitScore validation failed as expected else: if is_bad: pytest.fail( diff --git a/tests/test_extratrees.py b/tests/test_extratrees.py index ed82edf..f1c2953 100644 --- a/tests/test_extratrees.py +++ b/tests/test_extratrees.py @@ -15,16 +15,12 @@ def test_tree_(self): assert not hasattr(self.model, "trees_") def test_fit(self): - try: + with pytest.raises(NotImplementedError): self.model.fit(None, None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("ExtraTreesTemplate.fit expectedly refused call") def test_predict(self): - try: + with pytest.raises(NotImplementedError): self.model.predict(None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("ExtraTreesTemplate.predict expectedly refused call") class TestExtraTreesRegressor: diff --git a/tests/test_gradientboostedtrees.py b/tests/test_gradientboostedtrees.py index e3882da..e2dbede 100644 --- a/tests/test_gradientboostedtrees.py +++ b/tests/test_gradientboostedtrees.py @@ -14,16 +14,12 @@ def test_tree_(self): assert not hasattr(self.model, "trees_") def test_fit(self): - try: + with pytest.raises(NotImplementedError): self.model.fit(None, None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("DecisionTreeTemplate.fit expectedly refused call") def test_predict(self): - try: + with pytest.raises(NotImplementedError): self.model.predict(None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("DecisionTreeTemplate.predict expectedly refused call") class TestGradientBoostedTreesRegressor: @@ -112,7 +108,7 @@ def test_bool_to_float(x, exp, is_bad: bool): res = gbt.bool_to_float(x) except ValueError as ex: if is_bad: - pytest.xfail("Failed expectedly to convert non-bool values") + pass # Failed expectedly to convert non-bool values else: if is_bad: pytest.fail(f"Passed unexpectedly for non-bool value {x} returning {res}") diff --git a/tests/test_randomforest.py b/tests/test_randomforest.py index 6ae341e..6d4dac7 100644 --- a/tests/test_randomforest.py +++ b/tests/test_randomforest.py @@ -14,16 +14,12 @@ def test_tree_(self): assert not hasattr(self.model, "trees_") def test_fit(self): - try: + with pytest.raises(NotImplementedError): self.model.fit(None, None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("RandomForestTemplate.fit expectedly refused call") def test_predict(self): - try: + with pytest.raises(NotImplementedError): self.model.predict(None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("RandomForestTemplate.predict expectedly refused call") class TestRandomForestRegressor: diff --git a/tests/test_scoring.py b/tests/test_scoring.py index c0b9ca2..bc8f5f1 100644 --- a/tests/test_scoring.py +++ b/tests/test_scoring.py @@ -24,7 +24,7 @@ def test_check_y_and_target_groups(y, target_groups, is_bad): scoring.check_y_and_target_groups(y, target_groups=target_groups) except ValueError as ex: if is_bad: - pytest.xfail("y and target_groups properly failed") + pass # y and target_groups properly failed else: raise ex else: @@ -51,7 +51,7 @@ def test_calc_variance(y: np.ndarray, target_groups: np.ndarray, variance_exp: f variance = scoring.calc_variance(y, target_groups) except ValueError as ex: if variance_exp is None: - pytest.xfail("Properly raised error calculating the variance") + pass # "Properly raised error calculating the variance" else: raise ex else: @@ -74,7 +74,7 @@ def test_entropy(y: np.ndarray): h = scoring.entropy(y) except ValueError as ex: if len(y) == 0: - pytest.xfail("entropy properly failed because of empty y") + pass # entropy properly failed because of empty y else: raise ex else: @@ -98,7 +98,7 @@ def test_entropy_rs(y: np.ndarray): h = rs_entropy(y.tolist()) except ValueError as ex: if len(y) == 0: - pytest.xfail("entropy properly failed because of empty y") + pass # entropy properly failed because of empty y else: raise ex else: @@ -127,7 +127,7 @@ def test_calc_entropy(y: np.ndarray, target_groups: np.ndarray, h_exp: float): h = scoring.calc_entropy(y, target_groups) except ValueError as ex: if h_exp is None: - pytest.xfail("Properly raised error calculating the entropy") + pass # Properly raised error calculating the entropy else: raise ex else: @@ -155,7 +155,7 @@ def test_calc_entropy_rs(y: np.ndarray, target_groups: np.ndarray, h_exp: float) h = scoring.calc_entropy_rs(y, target_groups) except ValueError as ex: if h_exp is None: - pytest.xfail("Properly raised error calculating the entropy") + pass # Properly raised error calculating the entropy else: raise ex else: @@ -178,7 +178,7 @@ def test_gini_impurity(y: np.ndarray): g = scoring.gini_impurity(y) except ValueError as ex: if len(y) == 0: - pytest.xfail("gini_impurity properly failed because of empty y") + pass # gini_impurity properly failed because of empty y else: raise ex else: @@ -202,7 +202,7 @@ def test_gini_impurity_rs(y: np.ndarray): g = rs_gini_impurity(y.tolist()) except ValueError as ex: if len(y) == 0: - pytest.xfail("gini_impurity properly failed because of empty y") + pass # gini_impurity properly failed because of empty y else: raise ex else: @@ -245,7 +245,7 @@ def test_calc_gini_impurity(y: np.ndarray, target_groups: np.ndarray, g_exp: flo g = scoring.calc_gini_impurity(y, target_groups) except ValueError as ex: if g_exp is None: - pytest.xfail("Properly raised error calculating the gini impurity") + pass # Properly raised error calculating the gini impurity else: raise ex else: @@ -273,7 +273,7 @@ def test_calc_gini_impurity_rs(y: np.ndarray, target_groups: np.ndarray, g_exp: g = scoring.calc_gini_impurity_rs(y, target_groups) except ValueError as ex: if g_exp is None: - pytest.xfail("Properly raised error calculating the gini impurity") + pass # Properly raised error calculating the gini impurity else: raise ex else: @@ -299,7 +299,7 @@ def test_xgboost_split_score(g: np.ndarray, h: np.ndarray, is_bad: bool): score = scoring.xgboost_split_score(g, h, growth_params) except ValueError as ex: if is_bad: - pytest.xfail("xgboost_split_score properly failed because of empty g or h") + pass # xgboost_split_score properly failed because of empty g or h else: raise ex else: @@ -355,7 +355,7 @@ def test_calc_xgboost_split_score( score = scoring.calc_xgboost_split_score(target_groups, g, h, growth_params) except ValueError as ex: if score_exp is None: - pytest.xfail("Properly raised error calculating the xgboost score") + pass # Properly raised error calculating the xgboost score else: raise ex else: diff --git a/tests/test_utils.py b/tests/test_utils.py index 5b143a0..a6a301f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -3,35 +3,36 @@ import pytest from pydantic import ValidationError -import random_tree_models.params import random_tree_models.utils as utils +from random_tree_models.params import ( + ColumnSelectionMethod, + ColumnSelectionParameters, + ThresholdSelectionMethod, + ThresholdSelectionParameters, + TreeGrowthParameters, +) def test_ColumnSelectionMethod(): expected = ["ascending", "largest_delta", "random"] - assert ( - list(random_tree_models.params.ColumnSelectionMethod.__members__.keys()) - == expected - ) + assert list(ColumnSelectionMethod.__members__.keys()) == expected def test_ThresholdSelectionMethod(): expected = ["bruteforce", "quantile", "random", "uniform"] - assert ( - list(random_tree_models.params.ThresholdSelectionMethod.__members__.keys()) - == expected - ) + assert list(ThresholdSelectionMethod.__members__.keys()) == expected # method, quantile, random_state, n_thresholds class TestThresholdSelectionParameters: def test_expected_okay(self): - params = random_tree_models.params.ThresholdSelectionParameters( - method="quantile", quantile=0.1, random_state=0, n_thresholds=100 - ) - assert ( - params.method == random_tree_models.params.ThresholdSelectionMethod.quantile + params = ThresholdSelectionParameters( + method=ThresholdSelectionMethod.quantile, + quantile=0.1, + random_state=0, + n_thresholds=100, ) + assert params.method == ThresholdSelectionMethod.quantile assert params.quantile == 0.1 assert params.random_state == 0 assert params.n_thresholds == 100 @@ -39,13 +40,16 @@ def test_expected_okay(self): def test_method_fail(self): try: - _ = random_tree_models.params.ThresholdSelectionParameters( - method="wuppy", quantile=0.1, random_state=0, n_thresholds=100 + _ = ThresholdSelectionParameters( + method="wuppy", # type: ignore + quantile=0.1, + random_state=0, + n_thresholds=100, ) - except ValueError as ex: - pytest.xfail(f"init with unknown method should fail: {ex}") + except ValueError: + pass # f"init with unknown method should fail: {ex}" else: - pytest.fail(f"init with unknown method should fail") + raise @pytest.mark.parametrize( "q,fail", @@ -53,17 +57,20 @@ def test_method_fail(self): ) def test_quantile(self, q: float, fail: bool): try: - _ = random_tree_models.params.ThresholdSelectionParameters( - method="quantile", quantile=q, random_state=0, n_thresholds=100 + _ = ThresholdSelectionParameters( + method=ThresholdSelectionMethod.quantile, + quantile=q, + random_state=0, + n_thresholds=100, ) except ValueError as ex: if fail: - pytest.xfail(f"init with quantile {q} should fail: {ex}") + pass # f"init with quantile {q} should fail: {ex}" else: pytest.fail(f"init with quantile {q} should fail: {ex}") else: if fail: - pytest.fail(f"init with quantile {q} should fail: {ex}") + raise @pytest.mark.parametrize( "random_state,fail", @@ -75,20 +82,20 @@ def test_quantile(self, q: float, fail: bool): ) def test_random_state(self, random_state: int, fail: bool): try: - _ = random_tree_models.params.ThresholdSelectionParameters( - method="quantile", + _ = ThresholdSelectionParameters( + method=ThresholdSelectionMethod.quantile, quantile=0.1, random_state=random_state, n_thresholds=100, ) except ValueError as ex: if fail: - pytest.xfail(f"init with {random_state=} should fail: {ex}") + pass # f"init with {random_state=} should fail: {ex}" else: pytest.fail(f"init with {random_state=} should fail: {ex}") else: if fail: - pytest.fail(f"init with {random_state=} should fail: {ex}") + pytest.fail(f"init with {random_state=} should fail") @pytest.mark.parametrize( "n_thresholds,fail", @@ -104,47 +111,45 @@ def test_random_state(self, random_state: int, fail: bool): ) def test_n_thresholds(self, n_thresholds: int, fail: bool): try: - _ = random_tree_models.params.ThresholdSelectionParameters( - method="quantile", + _ = ThresholdSelectionParameters( + method=ThresholdSelectionMethod.quantile, quantile=0.1, random_state=42, n_thresholds=n_thresholds, ) except ValueError as ex: if fail: - pytest.xfail(f"init with {n_thresholds=} should fail: {ex}") + pass # f"init with {n_thresholds=} should fail: {ex}" else: pytest.fail(f"init with {n_thresholds=} should fail: {ex}") else: if fail: - pytest.fail(f"init with {n_thresholds=} should fail: {ex}") + raise def test_ColumnSelectionParameters(): - params = random_tree_models.params.ColumnSelectionParameters( - method="random", n_trials=10 - ) - assert params.method == random_tree_models.params.ColumnSelectionMethod.random + params = ColumnSelectionParameters(method=ColumnSelectionMethod.random, n_trials=10) + assert params.method == ColumnSelectionMethod.random assert params.n_trials == 10 class TestTreeGrowthParameters: def test_expected_okay(self): - params = random_tree_models.params.TreeGrowthParameters( + params = TreeGrowthParameters( max_depth=10, min_improvement=0.0, lam=0.0, frac_subsamples=1.0, frac_features=1.0, random_state=0, - threshold_params=random_tree_models.params.ThresholdSelectionParameters( - method="quantile", + threshold_params=ThresholdSelectionParameters( + method=ThresholdSelectionMethod.quantile, quantile=0.1, random_state=0, n_thresholds=100, ), - column_params=random_tree_models.params.ColumnSelectionParameters( - method="random", n_trials=10 + column_params=ColumnSelectionParameters( + method=ColumnSelectionMethod.random, n_trials=10 ), ) assert params.max_depth == 10 @@ -155,11 +160,9 @@ def test_expected_okay(self): assert params.random_state == 0 assert isinstance( params.threshold_params, - random_tree_models.params.ThresholdSelectionParameters, - ) - assert isinstance( - params.column_params, random_tree_models.params.ColumnSelectionParameters + ThresholdSelectionParameters, ) + assert isinstance(params.column_params, ColumnSelectionParameters) @pytest.mark.parametrize( "frac_subsamples,fail", @@ -173,13 +176,13 @@ def test_expected_okay(self): ) def test_frac_subsamples(self, frac_subsamples: float, fail: bool): try: - _ = random_tree_models.params.TreeGrowthParameters( + _ = TreeGrowthParameters( max_depth=10, frac_subsamples=frac_subsamples, ) except ValueError as ex: if fail: - pytest.xfail(f"init with {frac_subsamples=} should fail: {ex}") + pass # f"init with {frac_subsamples=} should fail: {ex}" else: pytest.fail(f"init with {frac_subsamples=} should fail: {ex}") else: @@ -198,22 +201,22 @@ def test_frac_subsamples(self, frac_subsamples: float, fail: bool): ) def test_frac_features(self, frac_features: float, fail: bool): try: - _ = random_tree_models.params.TreeGrowthParameters( + _ = TreeGrowthParameters( max_depth=10, frac_features=frac_features, ) except ValueError as ex: if fail: - pytest.xfail(f"init with {frac_features=} should fail: {ex}") + pass # f"init with {frac_features=} should fail: {ex}" else: pytest.fail(f"init with {frac_features=} should fail: {ex}") else: if fail: - pytest.fail(f"init with {frac_features=} should fail: {ex}") + pytest.fail(f"init with {frac_features=} should fail") def test_fail_if_max_depth_missing(self): with pytest.raises(ValidationError): - _ = random_tree_models.params.TreeGrowthParameters() # type: ignore + _ = TreeGrowthParameters() # type: ignore def test_get_logger(): diff --git a/tests/test_xgboost.py b/tests/test_xgboost.py index c11f38c..aada2a5 100644 --- a/tests/test_xgboost.py +++ b/tests/test_xgboost.py @@ -14,16 +14,12 @@ def test_tree_(self): assert not hasattr(self.model, "trees_") def test_fit(self): - try: + with pytest.raises(NotImplementedError): self.model.fit(None, None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("XGBoostTemplate.fit expectedly refused call") def test_predict(self): - try: + with pytest.raises(NotImplementedError): self.model.predict(None) # type: ignore - except NotImplementedError as ex: - pytest.xfail("XGBoostTemplate.predict expectedly refused call") class TestXGBoostRegressor: @@ -112,7 +108,7 @@ def test_compute_start_estimate_binomial_loglikelihood( start_estimate = xgboost.compute_start_estimate_binomial_loglikelihood(y_float) except ValueError as ex: if start_estimate_exp is None: - pytest.xfail(f"expectedly failed for non -1 and 1 values") + pass # expectedly failed for non -1 and 1 values else: raise ex else: @@ -172,7 +168,7 @@ def test_compute_derivatives_binomial_loglikelihood( g, h = xgboost.compute_derivatives_binomial_loglikelihood(y_float, yhat) except ValueError as ex: if is_bad: - pytest.xfail("Expectedly failed for incorrect y_float values") + pass # Expectedly failed for incorrect y_float values" else: raise ex else: