diff --git a/aeon/pipeline/_make_pipeline.py b/aeon/pipeline/_make_pipeline.py index 872cd71e85..5ee495df90 100644 --- a/aeon/pipeline/_make_pipeline.py +++ b/aeon/pipeline/_make_pipeline.py @@ -50,13 +50,6 @@ def make_pipeline(*steps): >>> pipe = make_pipeline(ExponentTransformer(), Catch22Classifier()) >>> type(pipe).__name__ 'ClassifierPipeline' - - Example 3: transformer pipeline - >>> from aeon.pipeline import make_pipeline - >>> from aeon.transformations.exponent import ExponentTransformer - >>> pipe = make_pipeline(ExponentTransformer(), ExponentTransformer()) - >>> type(pipe).__name__ - 'TransformerPipeline' """ if len(steps) == 1 and isinstance(steps[0], list): steps = steps[0] diff --git a/aeon/pipeline/tests/test_make_pipeline.py b/aeon/pipeline/tests/test_make_pipeline.py index c950de495e..1dbc0012b5 100644 --- a/aeon/pipeline/tests/test_make_pipeline.py +++ b/aeon/pipeline/tests/test_make_pipeline.py @@ -9,15 +9,11 @@ from aeon.base import BaseEstimator from aeon.classification import DummyClassifier from aeon.clustering import TimeSeriesKMeans -from aeon.forecasting.base import BaseForecaster -from aeon.forecasting.naive import NaiveForecaster from aeon.pipeline import make_pipeline from aeon.regression import DummyRegressor -from aeon.testing.utils.data_gen import make_example_2d_numpy, make_example_3d_numpy -from aeon.transformations.base import BaseTransformer +from aeon.testing.utils.data_gen import make_example_3d_numpy from aeon.transformations.collection import PaddingTransformer, Tabularizer from aeon.transformations.collection.feature_based import SevenNumberSummaryTransformer -from aeon.transformations.exponent import ExponentTransformer @pytest.mark.parametrize( @@ -47,30 +43,3 @@ def test_make_pipeline(pipeline): assert isinstance(est, BaseEstimator) assert isinstance(o, np.ndarray) - - -@pytest.mark.parametrize( - "pipeline", - [ - [ExponentTransformer(), NaiveForecaster()], - [ExponentTransformer(), ExponentTransformer(power=3)], - ], -) -def test_make_pipeline_legacy(pipeline): - """Test that make_pipeline works for some legacy interfaces.""" - assert isinstance(pipeline[-1], BaseTransformer) or isinstance( - pipeline[-1], BaseForecaster - ) - - X, y = make_example_2d_numpy() - - est = make_pipeline(pipeline) - est.fit(X, y) - - if hasattr(est, "predict"): - o = est.predict([0, 1, 2, 3]) - else: - o = est.transform(X) - - assert isinstance(est, BaseEstimator) - assert isinstance(o, np.ndarray) diff --git a/aeon/transformations/base.py b/aeon/transformations/base.py index e1b8f03d6c..98f30fde2f 100644 --- a/aeon/transformations/base.py +++ b/aeon/transformations/base.py @@ -54,11 +54,6 @@ class name: BaseTransformer from aeon.datatypes._series_as_panel import convert_to_scitype from aeon.datatypes._vec_df import _VectorizedDF from aeon.utils.index_functions import update_data -from aeon.utils.sklearn import ( - is_sklearn_classifier, - is_sklearn_regressor, - is_sklearn_transformer, -) from aeon.utils.validation import abstract_types, is_univariate_series, validate_input from aeon.utils.validation._dependencies import _check_estimator_deps @@ -139,202 +134,6 @@ def __init__(self, _output_convert="auto"): super().__init__() _check_estimator_deps(self) - def __mul__(self, other): - """Magic * method, return (right) concatenated TransformerPipeline. - - Implemented for `other` being a transformer, otherwise returns `NotImplemented`. - - Parameters - ---------- - other: `aeon` transformer, must inherit from BaseTransformer - otherwise, `NotImplemented` is returned - - Returns - ------- - TransformerPipeline object, concatenation of `self` (first) with `other` (last). - not nested, contains only non-TransformerPipeline `aeon` transformers - """ - from aeon.transformations.compose import TransformerPipeline - - # we wrap self in a pipeline, and concatenate with the other - # the TransformerPipeline does the rest, e.g., case distinctions on other - if ( - isinstance(other, BaseTransformer) - or is_sklearn_classifier(other) - or is_sklearn_regressor(other) - or is_sklearn_transformer(other) - ): - self_as_pipeline = TransformerPipeline(steps=[self]) - return self_as_pipeline * other - else: - return NotImplemented - - def __rmul__(self, other): - """Magic * method, return (left) concatenated TransformerPipeline. - - Implemented for `other` being a transformer, otherwise returns `NotImplemented`. - - Parameters - ---------- - other: `aeon` transformer, must inherit from BaseTransformer - otherwise, `NotImplemented` is returned - - Returns - ------- - TransformerPipeline object, concatenation of `other` (first) with `self` (last). - not nested, contains only non-TransformerPipeline `aeon` transformers - """ - from aeon.transformations.compose import TransformerPipeline - - # we wrap self in a pipeline, and concatenate with the other - # the TransformerPipeline does the rest, e.g., case distinctions on other - if isinstance(other, BaseTransformer) or is_sklearn_transformer(other): - self_as_pipeline = TransformerPipeline(steps=[self]) - return other * self_as_pipeline - else: - return NotImplemented - - def __or__(self, other): - """Magic | method, return MultiplexTranformer. - - Implemented for `other` being either a MultiplexTransformer or a transformer. - - Parameters - ---------- - other: `aeon` transformer or aeon MultiplexTransformer - - Returns - ------- - MultiplexTransformer object - """ - from aeon.transformations.compose import MultiplexTransformer - - if isinstance(other, BaseTransformer): - multiplex_self = MultiplexTransformer([self]) - return multiplex_self | other - else: - return NotImplemented - - def __add__(self, other): - """Magic + method, return (right) concatenated FeatureUnion. - - Implemented for `other` being a transformer, otherwise returns `NotImplemented`. - - Parameters - ---------- - other: `aeon` transformer, must inherit from BaseTransformer - otherwise, `NotImplemented` is returned - - Returns - ------- - FeatureUnion object, concatenation of `self` (first) with `other` (last). - not nested, contains only non-TransformerPipeline `aeon` transformers - """ - from aeon.transformations.compose import FeatureUnion - - # we wrap self in a pipeline, and concatenate with the other - # the FeatureUnion does the rest, e.g., case distinctions on other - if isinstance(other, BaseTransformer): - self_as_pipeline = FeatureUnion(transformer_list=[self]) - return self_as_pipeline + other - else: - return NotImplemented - - def __radd__(self, other): - """Magic + method, return (left) concatenated FeatureUnion. - - Implemented for `other` being a transformer, otherwise returns `NotImplemented`. - - Parameters - ---------- - other: `aeon` transformer, must inherit from BaseTransformer - otherwise, `NotImplemented` is returned - - Returns - ------- - FeatureUnion object, concatenation of `other` (first) with `self` (last). - not nested, contains only non-FeatureUnion `aeon` transformers - """ - from aeon.transformations.compose import FeatureUnion - - # we wrap self in a pipeline, and concatenate with the other - # the TransformerPipeline does the rest, e.g., case distinctions on other - if isinstance(other, BaseTransformer): - self_as_pipeline = FeatureUnion(transformer_list=[self]) - return other + self_as_pipeline - else: - return NotImplemented - - def __invert__(self): - """Magic unary ~ (inversion) method, return InvertTransform of self. - - Returns - ------- - `InvertTransform` object, containing `self`. - """ - from aeon.transformations.compose import InvertTransform - - return InvertTransform(self) - - def __neg__(self): - """Magic unary - (negation) method, return OptionalPassthrough of self. - - Intuition: `OptionalPassthrough` is "not having transformer", as an option. - - Returns - ------- - `OptionalPassthrough` object, containing `self`, with `passthrough=False`. - The `passthrough` parameter can be set via `set_params`. - """ - from aeon.transformations.compose import OptionalPassthrough - - return OptionalPassthrough(self, passthrough=False) - - def __getitem__(self, key): - """Magic [...] method, return column subsetted transformer. - - First index does intput subsetting, second index does output subsetting. - - Keys must be valid inputs for `columns` in `ColumnSubset`. - - Parameters - ---------- - key: valid input for `columns` in `ColumnSubset`, or pair thereof - keys can also be a :-slice, in which case it is considered as not passed - - Returns - ------- - the following TransformerPipeline object: - ColumnSubset(columns1) * self * ColumnSubset(columns2) - where `columns1` is first or only item in `key`, and `columns2` is the last - if only one item is passed in `key`, only `columns1` is applied to input - """ - from aeon.transformations.subset import ColumnSelect - - def is_noneslice(obj): - res = isinstance(obj, slice) - res = res and obj.start is None and obj.stop is None and obj.step is None - return res - - if isinstance(key, tuple): - if not len(key) == 2: - raise ValueError( - "there should be one or two keys when calling [] or getitem, " - "e.g., mytrafo[key], or mytrafo[key1, key2]" - ) - columns1 = key[0] - columns2 = key[1] - if is_noneslice(columns1) and is_noneslice(columns2): - return self - elif is_noneslice(columns2): - return ColumnSelect(columns1) * self - elif is_noneslice(columns1): - return self * ColumnSelect(columns2) - else: - return ColumnSelect(columns1) * self * ColumnSelect(columns2) - else: - return ColumnSelect(key) * self - def fit(self, X, y=None): """Fit transformer to X, optionally to y. diff --git a/aeon/transformations/compose.py b/aeon/transformations/compose.py index ccf8b0ee1b..46547d18e9 100644 --- a/aeon/transformations/compose.py +++ b/aeon/transformations/compose.py @@ -109,32 +109,6 @@ class TransformerPipeline(_HeterogenousMetaEstimator, BaseTransformer): Example 1, option A: construct without strings (unique names are generated for the two components t1 and t2) >>> pipe = TransformerPipeline(steps = [t1, t2]) - - Example 1, option B: construct with strings to give custom names to steps - >>> pipe = TransformerPipeline( - ... steps = [ - ... ("trafo1", t1), - ... ("trafo2", t2), - ... ] - ... ) - - Example 1, option C: for quick construction, the * dunder method can be used - >>> pipe = t1 * t2 - - Example 2: sklearn transformers can be used in the pipeline. - If applied to Series, sklearn transformers are applied by series instance. - If applied to Table, sklearn transformers are applied to the table as a whole. - >>> from sklearn.preprocessing import StandardScaler - >>> from aeon.transformations.summarize import SummaryTransformer - - This applies the scaler per series, then summarizes: - >>> pipe = StandardScaler() * SummaryTransformer() - - This applies the sumamrization, then scales the full summary table: - >>> pipe = SummaryTransformer() * StandardScaler() - - This scales the series, then summarizes, then scales the full summary table: - >>> pipe = StandardScaler() * SummaryTransformer() * StandardScaler() """ _tags = { @@ -656,29 +630,6 @@ class FitInTransform(BaseTransformer): fitted on the inverse_transform data. This is required to have a non- state changing transform() method of FitInTransform. - Examples - -------- - >>> from aeon.datasets import load_longley - >>> from aeon.forecasting.naive import NaiveForecaster - >>> from aeon.forecasting.base import ForecastingHorizon - >>> from aeon.forecasting.compose import ForecastingPipeline - >>> from aeon.forecasting.model_selection import temporal_train_test_split - >>> from aeon.transformations.compose import FitInTransform - >>> from aeon.transformations.impute import Imputer - >>> y, X = load_longley() - >>> y_train, y_test, X_train, X_test = temporal_train_test_split(y, X) - >>> fh = ForecastingHorizon(y_test.index, is_relative=False) - >>> # we want to fit the Imputer only on the predict (=transform) data. - >>> # note that NaiveForecaster cant use X data, this is just a show case. - >>> pipe = ForecastingPipeline( - ... steps=[ - ... ("imputer", FitInTransform(Imputer(method="mean"))), - ... ("forecaster", NaiveForecaster()), - ... ] - ... ) - >>> pipe.fit(y_train, X_train) - ForecastingPipeline(...) - >>> y_pred = pipe.predict(fh=fh, X=X_test) """ def __init__(self, transformer, skip_inverse_transform=True): diff --git a/aeon/transformations/lag.py b/aeon/transformations/lag.py index 137b238c68..22d7d3c8ec 100644 --- a/aeon/transformations/lag.py +++ b/aeon/transformations/lag.py @@ -101,9 +101,10 @@ class Lag(BaseTransformer): >>> from aeon.datasets import load_airline >>> from aeon.transformations.impute import Imputer >>> from aeon.transformations.lag import Lag + >>> from sklearn.pipeline import make_pipeline >>> X = load_airline() >>> - >>> t = Lag([2, 4, -1]) * Imputer("nearest") + >>> t = make_pipeline(Lag([2, 4, -1]),Imputer("nearest")) >>> Xt = t.fit_transform(X) """ @@ -380,9 +381,10 @@ class ReducerTransform(BaseTransformer): >>> from aeon.datasets import load_airline >>> from aeon.transformations.impute import Imputer >>> from aeon.transformations.lag import Lag + >>> from sklearn.pipeline import make_pipeline >>> X = load_airline() >>> - >>> t = Lag([2, 4, -1]) * Imputer("nearest") + >>> t = make_pipeline(Lag([2, 4, -1]),Imputer("nearest")) >>> Xt = t.fit_transform(X) """ diff --git a/aeon/transformations/tests/test_compose.py b/aeon/transformations/tests/test_compose.py deleted file mode 100644 index 1caa762a37..0000000000 --- a/aeon/transformations/tests/test_compose.py +++ /dev/null @@ -1,278 +0,0 @@ -"""Unit tests for transformer composition functionality attached to the base class.""" - -__maintainer__ = [] -__all__ = [] - -import pandas as pd -from sklearn.preprocessing import StandardScaler - -from aeon.datasets import load_airline, load_basic_motions -from aeon.testing.utils.data_gen import get_examples -from aeon.testing.utils.deep_equals import deep_equals -from aeon.testing.utils.estimator_checks import _assert_array_almost_equal -from aeon.transformations.boxcox import LogTransformer -from aeon.transformations.collection.pad import PaddingTransformer -from aeon.transformations.compose import ( - ColumnConcatenator, - FeatureUnion, - InvertTransform, - OptionalPassthrough, - TransformerPipeline, -) -from aeon.transformations.exponent import ExponentTransformer -from aeon.transformations.impute import Imputer -from aeon.transformations.subset import ColumnSelect -from aeon.transformations.summarize import SummaryTransformer -from aeon.transformations.theta import ThetaLinesTransformer - - -def test_dunder_mul(): - """Test the mul dunder method.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t1 = ExponentTransformer(power=2) - t2 = ExponentTransformer(power=5) - t3 = ExponentTransformer(power=0.1) - t4 = ExponentTransformer(power=1) - - t12 = t1 * t2 - t123 = t12 * t3 - t312 = t3 * t12 - t1234 = t123 * t4 - t1234_2 = t12 * (t3 * t4) - - assert isinstance(t12, TransformerPipeline) - assert isinstance(t123, TransformerPipeline) - assert isinstance(t312, TransformerPipeline) - assert isinstance(t1234, TransformerPipeline) - assert isinstance(t1234_2, TransformerPipeline) - - assert [x.power for x in t12.steps] == [2, 5] - assert [x.power for x in t123.steps] == [2, 5, 0.1] - assert [x.power for x in t312.steps] == [0.1, 2, 5] - assert [x.power for x in t1234.steps] == [2, 5, 0.1, 1] - assert [x.power for x in t1234_2.steps] == [2, 5, 0.1, 1] - - _assert_array_almost_equal(X, t123.fit_transform(X)) - _assert_array_almost_equal(X, t312.fit_transform(X)) - _assert_array_almost_equal(X, t1234.fit_transform(X)) - _assert_array_almost_equal(X, t1234_2.fit_transform(X)) - _assert_array_almost_equal(t12.fit_transform(X), t3.fit(X).inverse_transform(X)) - - -def test_dunder_add(): - """Test the add dunder method.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t1 = ExponentTransformer(power=2) - t2 = ExponentTransformer(power=5) - t3 = ExponentTransformer(power=3) - - t12 = t1 + t2 - t123 = t12 + t3 - t123r = t1 + (t2 + t3) - - assert isinstance(t12, FeatureUnion) - assert isinstance(t123, FeatureUnion) - assert isinstance(t123r, FeatureUnion) - - assert [x.power for x in t12.transformer_list] == [2, 5] - assert [x.power for x in t123.transformer_list] == [2, 5, 3] - assert [x.power for x in t123r.transformer_list] == [2, 5, 3] - - _assert_array_almost_equal(t123r.fit_transform(X), t123.fit_transform(X)) - - -def test_mul_sklearn_autoadapt(): - """Test auto-adapter for sklearn in mul.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t1 = ExponentTransformer(power=2) - t2 = StandardScaler() - t3 = ExponentTransformer(power=0.5) - - t123 = t1 * t2 * t3 - t123r = t1 * (t2 * t3) - t123l = (t1 * t2) * t3 - - assert isinstance(t123, TransformerPipeline) - assert isinstance(t123r, TransformerPipeline) - assert isinstance(t123l, TransformerPipeline) - - _assert_array_almost_equal(t123.fit_transform(X), t123l.fit_transform(X)) - _assert_array_almost_equal(t123r.fit_transform(X), t123l.fit_transform(X)) - - -def test_missing_unequal_tag_inference(): - """Test that TransformerPipeline infers missing/unequal tags correctly.""" - t1 = ExponentTransformer() * PaddingTransformer() * ExponentTransformer() - t2 = ExponentTransformer() * ExponentTransformer() - t3 = Imputer() * ExponentTransformer() - t4 = ExponentTransformer() * Imputer() - - assert t1.get_tag("capability:unequal_length") - assert t1.get_tag("capability:unequal_length:removes") - assert not t2.get_tag("capability:unequal_length:removes") - assert t3.get_tag("capability:missing_values") - assert t3.get_tag("capability:missing_values:removes") - assert not t4.get_tag("capability:missing_values") - assert not t4.get_tag("capability:missing_values:removes") - - -def test_featureunion_transform_cols(): - """Test FeatureUnion name and number of columns.""" - X = pd.DataFrame({"test1": [1, 2], "test2": [3, 4]}) - - t1 = ExponentTransformer(power=2) - t2 = ExponentTransformer(power=5) - t3 = ExponentTransformer(power=3) - - t123 = t1 + t2 + t3 - - Xt = t123.fit_transform(X) - - expected_cols = pd.Index( - [ - "ExponentTransformer_1__test1", - "ExponentTransformer_1__test2", - "ExponentTransformer_2__test1", - "ExponentTransformer_2__test2", - "ExponentTransformer_3__test1", - "ExponentTransformer_3__test2", - ] - ) - - msg = ( - f"FeatureUnion creates incorrect column names for DataFrame output. " - f"Expected: {expected_cols}, found: {Xt.columns}" - ) - - assert deep_equals(Xt.columns, expected_cols), msg - - -def test_sklearn_after_primitives(): - """Test that sklearn transformer after primitives is correctly applied.""" - t = SummaryTransformer() * StandardScaler() - assert t.get_tag("output_data_type") == "Primitives" - - X = get_examples("pd-multiindex")[0] - X_out = t.fit_transform(X) - X_summary = SummaryTransformer().fit_transform(X) - - assert (X_out.index == X_summary.index).all() - assert deep_equals(X_out.columns, X_summary.columns) - # var_0 is the same for all three instances - # so summary statistics are all the same, thus StandardScaler transforms to 0 - assert X_out.iloc[0, 0] > -0.01 - assert X_out.iloc[0, 0] < 0.01 - # var_1 has some variation between three instances - # fix this to one value to tie the output to current behaviour - assert X_out.iloc[0, 10] > -1.37 - assert X_out.iloc[0, 10] < -1.36 - - -def test_pipeline_column_vectorization(): - """Test that pipelines vectorize properly over columns.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t = ColumnSelect([0, 1]) * ThetaLinesTransformer() - - X_theta = t.fit_transform(X) - - assert set(X_theta.columns) == {"a__0", "a__2", "b__0", "b__2"} - - -def test_pipeline_inverse(): - """Tests that inverse composition works, with inverse skips. Also see #3084.""" - X = load_airline() - t = LogTransformer() * Imputer() - - # LogTransformer has inverse_transform, and does not skip inverse transform - # therefore, pipeline should also not skip inverse transform, and have capability - assert t.get_tag("capability:inverse_transform") - assert not t.get_tag("skip-inverse-transform") - - t.fit(X) - Xt = t.transform(X) - Xtt = t.inverse_transform(Xt) - - _assert_array_almost_equal(X, Xtt) - - -def test_subset_getitem(): - """Test subsetting using the [ ] dunder, __getitem__.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) - - t = ThetaLinesTransformer() - - t_before = t["a"] - t_before_with_colon = t[["a", "b"], :] - t_after_with_colon = t[:, ["a__0", "a__2"]] - t_both = t[["a", "b"], ["b__0", "b__2", "c__0", "c__2"]] - t_none = t[:, :] - - assert isinstance(t_before, TransformerPipeline) - assert isinstance(t_after_with_colon, TransformerPipeline) - assert isinstance(t_before_with_colon, TransformerPipeline) - assert isinstance(t_both, TransformerPipeline) - assert isinstance(t_none, ThetaLinesTransformer) - - X_theta = t.fit_transform(X) - - _assert_array_almost_equal(t_before.fit_transform(X), X_theta[["a__0", "a__2"]]) - _assert_array_almost_equal( - t_after_with_colon.fit_transform(X), X_theta[["a__0", "a__2"]] - ) - _assert_array_almost_equal( - t_before_with_colon.fit_transform(X), X_theta[["a__0", "a__2", "b__0", "b__2"]] - ) - _assert_array_almost_equal(t_both.fit_transform(X), X_theta[["b__0", "b__2"]]) - _assert_array_almost_equal(t_none.fit_transform(X), X_theta) - - -def test_dunder_invert(): - """Test the invert dunder method, for wrapping in OptionalPassthrough.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t = ExponentTransformer(power=3) - - t_inv = ~t - - assert isinstance(t_inv, InvertTransform) - assert isinstance(t_inv.get_params()["transformer"], ExponentTransformer) - - _assert_array_almost_equal( - t_inv.fit_transform(X), ExponentTransformer(1 / 3).fit_transform(X) - ) - - -def test_dunder_neg(): - """Test the neg dunder method, for wrapping in OptionalPassthrough.""" - X = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) - - t = ExponentTransformer(power=2) - - tp = -t - - assert isinstance(tp, OptionalPassthrough) - assert not tp.get_params()["passthrough"] - assert isinstance(tp.get_params()["transformer"], ExponentTransformer) - - _assert_array_almost_equal(tp.fit_transform(X), X) - - -def test_column_concatenator(): - X, y = load_basic_motions(split="train") - n_cases, n_channels, n_timepoints = X.shape - trans = ColumnConcatenator() - Xt = trans.fit_transform(X) - - # check if transformed dataframe is univariate - assert Xt.shape[1] == 1 - - # check if number of time series observations are correct - assert Xt.shape[2] == X.shape[1] * X.shape[2] - - # check specific observations - assert X[0][-1][-3] == Xt[0][0][-3] - assert X[0][0][3] == Xt[0, 0][3] diff --git a/aeon/transformations/tests/test_featureizer.py b/aeon/transformations/tests/test_featureizer.py deleted file mode 100644 index eeae99ab5b..0000000000 --- a/aeon/transformations/tests/test_featureizer.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Test YtoX.""" - -__maintainer__ = [] - -from numpy.testing import assert_array_equal - -from aeon.datasets import load_longley -from aeon.forecasting.model_selection import temporal_train_test_split -from aeon.transformations.compose import YtoX -from aeon.transformations.exponent import ExponentTransformer -from aeon.transformations.lag import Lag - -y, X = load_longley() -y_train, y_test, X_train, X_test = temporal_train_test_split(y, X) - - -def test_featurized_values(): - """Test against plain transformation. - - Test to check that the featurized values are same as if transformation - is done without YtoX. - """ - lags = len(y_test) - featurizer = YtoX() * ExponentTransformer() * Lag(lags) - featurizer.fit(X_train, y_train) - X_hat = featurizer.transform(X_test, y_test) - - exp_transformer = ExponentTransformer() - expected_len = lags + len(y_test) - y_hat = exp_transformer.fit_transform(y[-expected_len:]) - assert_array_equal(X_hat[f"lag_{lags}__TOTEMP"].values, y_hat.values) diff --git a/aeon/transformations/tests/test_multiplexer.py b/aeon/transformations/tests/test_multiplexer.py index 4705508ea2..8eeeaecf36 100644 --- a/aeon/transformations/tests/test_multiplexer.py +++ b/aeon/transformations/tests/test_multiplexer.py @@ -3,7 +3,6 @@ __maintainer__ = [] import numpy as np -import pytest from numpy.testing import assert_array_equal from sklearn.base import clone @@ -105,42 +104,3 @@ def test_multiplex_transformer_in_grid(): NaiveForecaster(strategy="mean"), transformer_tuples, cv, y ) assert gscv_best_name == best_name - - -def test_multiplex_or_dunder(): - """Test that the MultiplexTransforemer magic "|" dunder works. - - A MultiplexTransformer can be created by using the "|" dunder method on - either transformer or MultiplexTransformer objects. Here we test that it performs - as expected on all the use cases, and raises the expected error in some others. - """ - # test a simple | example with two transformers: - multiplex_two_transformers = ExponentTransformer(2) | ExponentTransformer(3) - assert isinstance(multiplex_two_transformers, MultiplexTransformer) - assert len(multiplex_two_transformers.transformers) == 2 - # now test that | also works on two MultiplexTransformers: - multiplex_one = MultiplexTransformer( - [("exp_2", ExponentTransformer(2)), ("exp_3", ExponentTransformer(3))] - ) - multiplex_two = MultiplexTransformer( - [("exp_4", ExponentTransformer(4)), ("exp_5", ExponentTransformer(5))] - ) - - multiplex_two_multiplex = multiplex_one | multiplex_two - assert isinstance(multiplex_two_multiplex, MultiplexTransformer) - assert len(multiplex_two_multiplex.transformers) == 4 - # last we will check 3 transformers with the same name - should check both that - # MultiplexTransformer | transformer works, and that ensure_unique_names works - multiplex_same_name_three_test = ( - ExponentTransformer(2) | ExponentTransformer(3) | ExponentTransformer(4) - ) - assert isinstance(multiplex_same_name_three_test, MultiplexTransformer) - assert len(multiplex_same_name_three_test._transformers) == 3 - transformer_param_names = multiplex_same_name_three_test._get_estimator_names( - multiplex_same_name_three_test._transformers - ) - assert len(set(transformer_param_names)) == 3 - - # test we get a ValueError if we try to | with anything else: - with pytest.raises(TypeError): - multiplex_one | "this shouldn't work"