diff --git a/autoPyTorch/api/base_task.py b/autoPyTorch/api/base_task.py
index 7ffa77873..7e3ad5554 100644
--- a/autoPyTorch/api/base_task.py
+++ b/autoPyTorch/api/base_task.py
@@ -111,23 +111,6 @@ def send_warnings_to_log(
     return prediction
 
 
-def get_search_updates(categorical_indicator: List[bool]):
-    """
-    These updates mimic the autopytorch tabular paper.
-    Returns:
-    ________
-    search_space_updates - HyperparameterSearchSpaceUpdates
-        The search space updates like setting different hps to different values or ranges.
-    """
-
-    has_cat_features = any(categorical_indicator)
-    has_numerical_features = not all(categorical_indicator)
-
-    search_space_updates = HyperparameterSearchSpaceUpdates()
-
-    return search_space_updates
-
-
 class BaseTask(ABC):
     """
     Base class for the tasks that serve as API to the pipelines.
@@ -200,7 +183,6 @@ def __init__(
         resampling_strategy_args: Optional[Dict[str, Any]] = None,
         search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None,
         task_type: Optional[str] = None,
-        categorical_indicator: Optional[List[bool]] = None
     ) -> None:
 
         if isinstance(resampling_strategy, NoResamplingStrategyTypes) and ensemble_size != 0:
@@ -267,7 +249,7 @@ def __init__(
 
         self.input_validator: Optional[BaseInputValidator] = None
 
-        self.search_space_updates = search_space_updates if search_space_updates is not None else get_search_updates(categorical_indicator)
+        self.search_space_updates = search_space_updates
         if search_space_updates is not None:
             if not isinstance(self.search_space_updates,
                               HyperparameterSearchSpaceUpdates):
diff --git a/autoPyTorch/api/tabular_classification.py b/autoPyTorch/api/tabular_classification.py
index ec0237046..b39f47834 100644
--- a/autoPyTorch/api/tabular_classification.py
+++ b/autoPyTorch/api/tabular_classification.py
@@ -98,7 +98,6 @@ def __init__(
         resampling_strategy_args: Optional[Dict[str, Any]] = None,
         backend: Optional[Backend] = None,
         search_space_updates: Optional[HyperparameterSearchSpaceUpdates] = None,
-        categorical_indicator: Optional[List[bool]] = None
     ):
         super().__init__(
             seed=seed,
@@ -119,7 +118,6 @@ def __init__(
             resampling_strategy_args=resampling_strategy_args,
             search_space_updates=search_space_updates,
             task_type=TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
-            categorical_indicator=categorical_indicator
         )
 
     def build_pipeline(
diff --git a/autoPyTorch/constants.py b/autoPyTorch/constants.py
index 652a546b9..d2d23d886 100644
--- a/autoPyTorch/constants.py
+++ b/autoPyTorch/constants.py
@@ -54,3 +54,5 @@
 
 CLASSIFICATION_OUTPUTS = [BINARY, MULTICLASS, MULTICLASSMULTIOUTPUT]
 REGRESSION_OUTPUTS = [CONTINUOUS, CONTINUOUSMULTIOUTPUT]
+
+MIN_CATEGORIES_FOR_EMBEDDING_MAX = 7
\ No newline at end of file
diff --git a/autoPyTorch/data/base_feature_validator.py b/autoPyTorch/data/base_feature_validator.py
index c2d3b1c91..33b8ddb5c 100644
--- a/autoPyTorch/data/base_feature_validator.py
+++ b/autoPyTorch/data/base_feature_validator.py
@@ -46,10 +46,10 @@ def __init__(
 
         # Required for dataset properties
         self.num_features: Optional[int] = None
-        self.categories: List[List[int]] = []
         self.categorical_columns: List[int] = []
         self.numerical_columns: List[int] = []
 
+        self.num_categories_per_col: Optional[List[int]] = []
         self.all_nan_columns: Optional[Set[Union[int, str]]] = None
 
         self._is_fitted = False
diff --git a/autoPyTorch/data/tabular_feature_validator.py b/autoPyTorch/data/tabular_feature_validator.py
index af7932557..4756f9e97 100644
--- a/autoPyTorch/data/tabular_feature_validator.py
+++ b/autoPyTorch/data/tabular_feature_validator.py
@@ -193,10 +193,8 @@ def _fit(
                 encoded_categories = self.column_transformer.\
                     named_transformers_['categorical_pipeline'].\
                     named_steps['ordinalencoder'].categories_
-                self.categories = [
-                    list(range(len(cat)))
-                    for cat in encoded_categories
-                ]
+
+                self.num_categories_per_col = [len(cat) for cat in encoded_categories]
 
             # differently to categorical_columns and numerical_columns,
             # this saves the index of the column.
@@ -274,8 +272,6 @@ def transform(
             X = self.numpy_to_pandas(X)
 
         if ispandas(X) and not issparse(X):
-            X = cast(pd.DataFrame, X)
-
             if self.all_nan_columns is None:
                 raise ValueError('_fit must be called before calling transform')
 
diff --git a/autoPyTorch/data/tabular_validator.py b/autoPyTorch/data/tabular_validator.py
index 492327fbe..a4b366651 100644
--- a/autoPyTorch/data/tabular_validator.py
+++ b/autoPyTorch/data/tabular_validator.py
@@ -104,6 +104,8 @@ def _compress_dataset(
                 y=y,
                 is_classification=self.is_classification,
                 random_state=self.seed,
+                categorical_columns=self.feature_validator.categorical_columns,
+                n_categories_per_cat_column=self.feature_validator.num_categories_per_col,
                 **self.dataset_compression  # type: ignore [arg-type]
             )
             self._reduced_dtype = dict(X.dtypes) if is_dataframe else X.dtype
diff --git a/autoPyTorch/data/utils.py b/autoPyTorch/data/utils.py
index 20ad5612e..2a44dd5c2 100644
--- a/autoPyTorch/data/utils.py
+++ b/autoPyTorch/data/utils.py
@@ -25,6 +25,7 @@
 from sklearn.utils import _approximate_mode, check_random_state
 from sklearn.utils.validation import _num_samples, check_array
 
+from autoPyTorch.constants import MIN_CATEGORIES_FOR_EMBEDDING_MAX
 from autoPyTorch.data.base_target_validator import SupportedTargetTypes
 from autoPyTorch.utils.common import ispandas
 
@@ -459,8 +460,8 @@ def _subsample_by_indices(
     return X, y
 
 
-def megabytes(arr: DatasetCompressionInputType) -> float:
-
+def get_raw_memory_usage(arr: DatasetCompressionInputType) -> float:
+    memory_in_bytes: float
     if isinstance(arr, np.ndarray):
         memory_in_bytes = arr.nbytes
     elif issparse(arr):
@@ -470,8 +471,43 @@ def megabytes(arr: DatasetCompressionInputType) -> float:
     else:
         raise ValueError(f"Unrecognised data type of X, expected data type to "
                          f"be in (np.ndarray, spmatrix, pd.DataFrame) but got :{type(arr)}")
+    return memory_in_bytes
+
+
+def get_approximate_mem_usage_in_mb(
+    arr: DatasetCompressionInputType,
+    categorical_columns: List,
+    n_categories_per_cat_column: Optional[List[int]] = None
+) -> float:
+
+    err_msg = "Value number of categories per categorical is required when the data has categorical columns"
+    if ispandas(arr):
+        arr_dtypes = arr.dtypes.to_dict()
+        multipliers = [dtype.itemsize for col, dtype in arr_dtypes.items() if col not in categorical_columns]
+        if len(categorical_columns) > 0:
+            if n_categories_per_cat_column is None:
+                raise ValueError(err_msg)
+            for col, num_cat in zip(categorical_columns, n_categories_per_cat_column):
+                if num_cat < MIN_CATEGORIES_FOR_EMBEDDING_MAX:
+                    multipliers.append(num_cat * arr_dtypes[col].itemsize)
+                else:
+                    multipliers.append(arr_dtypes[col].itemsize)
+        size_one_row = sum(multipliers)
+
+    elif isinstance(arr, (np.ndarray, spmatrix)):
+        n_cols = arr.shape[-1] - len(categorical_columns)
+        multiplier = arr.dtype.itemsize
+        if len(categorical_columns) > 0:
+            if n_categories_per_cat_column is None:
+                raise ValueError(err_msg)
+            # multiply num categories with the size of the column to capture memory after one hot encoding
+            n_cols += sum(num_cat if num_cat < MIN_CATEGORIES_FOR_EMBEDDING_MAX else 1 for num_cat in n_categories_per_cat_column)
+        size_one_row = n_cols * multiplier
+    else:
+        raise ValueError(f"Unrecognised data type of X, expected data type to "
+                         f"be in (np.ndarray, spmatrix, pd.DataFrame), but got :{type(arr)}")
 
-    return float(memory_in_bytes / (2**20))
+    return float(arr.shape[0] * size_one_row / (2**20))
 
 
 def reduce_dataset_size_if_too_large(
@@ -479,10 +515,13 @@ def reduce_dataset_size_if_too_large(
     memory_allocation: Union[int, float],
     is_classification: bool,
     random_state: Union[int, np.random.RandomState],
+    categorical_columns: List,
+    n_categories_per_cat_column: Optional[List[int]] = None,
     y: Optional[SupportedTargetTypes] = None,
     methods: List[str] = ['precision', 'subsample'],
 ) -> DatasetCompressionInputType:
-    f""" Reduces the size of the dataset if it's too close to the memory limit.
+    f"""
+    Reduces the size of the dataset if it's too close to the memory limit.
 
     Follows the order of the operations passed in and retains the type of its
     input.
@@ -513,7 +552,6 @@ def reduce_dataset_size_if_too_large(
                 Reduce the amount of samples of the dataset such that it fits into the allocated
                 memory. Ensures stratification and that unique labels are present
 
-
         memory_allocation (Union[int, float]):
             The amount of memory to allocate to the dataset. It should specify an
             absolute amount.
@@ -524,7 +562,7 @@ def reduce_dataset_size_if_too_large(
     """
 
     for method in methods:
-        if megabytes(X) <= memory_allocation:
+        if get_approximate_mem_usage_in_mb(X, categorical_columns, n_categories_per_cat_column) <= memory_allocation:
             break
 
         if method == 'precision':
@@ -540,7 +578,8 @@ def reduce_dataset_size_if_too_large(
             # into the allocated memory, we subsample it so that it does
 
             n_samples_before = X.shape[0]
-            sample_percentage = memory_allocation / megabytes(X)
+            sample_percentage = memory_allocation / get_approximate_mem_usage_in_mb(
+                X, categorical_columns, n_categories_per_cat_column)
 
             # NOTE: type ignore
             #
diff --git a/autoPyTorch/datasets/tabular_dataset.py b/autoPyTorch/datasets/tabular_dataset.py
index 6cabfe525..04a5df96b 100644
--- a/autoPyTorch/datasets/tabular_dataset.py
+++ b/autoPyTorch/datasets/tabular_dataset.py
@@ -81,7 +81,7 @@ def __init__(self,
         self.categorical_columns = validator.feature_validator.categorical_columns
         self.numerical_columns = validator.feature_validator.numerical_columns
         self.num_features = validator.feature_validator.num_features
-        self.categories = validator.feature_validator.categories
+        self.num_categories_per_col = validator.feature_validator.num_categories_per_col
 
         super().__init__(train_tensors=(X, Y), test_tensors=(X_test, Y_test), shuffle=shuffle,
                          resampling_strategy=resampling_strategy,
diff --git a/autoPyTorch/evaluation/train_evaluator.py b/autoPyTorch/evaluation/train_evaluator.py
index fed527f2e..6b35476d3 100644
--- a/autoPyTorch/evaluation/train_evaluator.py
+++ b/autoPyTorch/evaluation/train_evaluator.py
@@ -1,6 +1,6 @@
 import json
-from multiprocessing.queues import Queue
 import os
+from multiprocessing.queues import Queue
 from typing import Any, Dict, List, Optional, Tuple, Union
 
 from ConfigSpace.configuration_space import Configuration
@@ -22,6 +22,7 @@
     fit_and_suppress_warnings
 )
 from autoPyTorch.evaluation.utils import DisableFileOutputParameters
+from autoPyTorch.pipeline.base_pipeline import BasePipeline
 from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
 from autoPyTorch.pipeline.tabular_classification import TabularClassificationPipeline
 from autoPyTorch.utils.common import dict_repr, subsampler
@@ -195,24 +196,7 @@ def fit_predict_and_loss(self) -> None:
             additional_run_info = pipeline.get_additional_run_info() if hasattr(
                 pipeline, 'get_additional_run_info') else {}
 
-            # add learning curve of configurations to additional_run_info
-            if isinstance(pipeline, TabularClassificationPipeline):
-                if hasattr(pipeline.named_steps['trainer'], 'run_summary'):
-                    run_summary = pipeline.named_steps['trainer'].run_summary
-                    split_types = ['train', 'val', 'test']
-                    run_summary_dict = dict(
-                        run_summary={},
-                        budget=self.budget,
-                        seed=self.seed,
-                        config_id=self.configuration.config_id,
-                        num_run=self.num_run
-                        )
-                    for split_type in split_types:
-                        run_summary_dict['run_summary'][f'{split_type}_loss'] = run_summary.performance_tracker.get(f'{split_type}_loss', None)
-                        run_summary_dict['run_summary'][f'{split_type}_metrics'] = run_summary.performance_tracker.get(f'{split_type}_metrics', None)
-                    self.logger.debug(f"run_summary_dict {json.dumps(run_summary_dict)}")
-                    with open(os.path.join(self.backend.temporary_directory, 'run_summary.txt'), 'a') as file:
-                        file.write(f"{json.dumps(run_summary_dict)}\n")
+            # self._write_run_summary(pipeline)
 
             status = StatusType.SUCCESS
 
@@ -370,6 +354,27 @@ def fit_predict_and_loss(self) -> None:
                 status=status,
             )
 
+    def _write_run_summary(self, pipeline: BasePipeline) -> None:
+        # add learning curve of configurations to additional_run_info
+        if isinstance(pipeline, TabularClassificationPipeline):
+            assert isinstance(self.configuration, Configuration)
+            if hasattr(pipeline.named_steps['trainer'], 'run_summary'):
+                run_summary = pipeline.named_steps['trainer'].run_summary
+                split_types = ['train', 'val', 'test']
+                run_summary_dict = dict(
+                    run_summary={},
+                    budget=self.budget,
+                    seed=self.seed,
+                    config_id=self.configuration.config_id,
+                    num_run=self.num_run)
+                for split_type in split_types:
+                    run_summary_dict['run_summary'][f'{split_type}_loss'] = run_summary.performance_tracker.get(
+                        f'{split_type}_loss', None)
+                    run_summary_dict['run_summary'][f'{split_type}_metrics'] = run_summary.performance_tracker.get(
+                        f'{split_type}_metrics', None)
+                with open(os.path.join(self.backend.temporary_directory, 'run_summary.txt'), 'a') as file:
+                    file.write(f"{json.dumps(run_summary_dict)}\n")
+
     def _fit_and_predict(self, pipeline: BaseEstimator, fold: int, train_indices: Union[np.ndarray, List],
                          test_indices: Union[np.ndarray, List],
                          add_pipeline_to_self: bool
diff --git a/autoPyTorch/pipeline/base_pipeline.py b/autoPyTorch/pipeline/base_pipeline.py
index 8bf3447ce..bb5c03bfd 100644
--- a/autoPyTorch/pipeline/base_pipeline.py
+++ b/autoPyTorch/pipeline/base_pipeline.py
@@ -1,4 +1,3 @@
-from copy import copy
 import warnings
 from abc import ABCMeta
 from collections import Counter
@@ -297,11 +296,10 @@ def _get_hyperparameter_search_space(self,
         """
         raise NotImplementedError()
 
-    def _add_forbidden_conditions(self, cs):
+    def _add_forbidden_conditions(self, cs: ConfigurationSpace) -> ConfigurationSpace:
         """
         Add forbidden conditions to ensure valid configurations.
-        Currently, Learned Entity Embedding is only valid when encoder is one hot encoder
-        and CyclicLR is disabled when using stochastic weight averaging and snapshot
+        Currently, CyclicLR is disabled when using stochastic weight averaging and snapshot
         ensembling.
 
         Args:
@@ -310,33 +308,6 @@ def _add_forbidden_conditions(self, cs):
 
         """
 
-        # Learned Entity Embedding is only valid when encoder is one hot encoder
-        if 'network_embedding' in self.named_steps.keys() and 'encoder' in self.named_steps.keys():
-            embeddings = cs.get_hyperparameter('network_embedding:__choice__').choices
-            if 'LearnedEntityEmbedding' in embeddings:
-                encoders = cs.get_hyperparameter('encoder:__choice__').choices
-                possible_default_embeddings = copy(list(embeddings))
-                del possible_default_embeddings[possible_default_embeddings.index('LearnedEntityEmbedding')]
-
-                for encoder in encoders:
-                    if encoder == 'OneHotEncoder':
-                        continue
-                    while True:
-                        try:
-                            cs.add_forbidden_clause(ForbiddenAndConjunction(
-                                ForbiddenEqualsClause(cs.get_hyperparameter(
-                                    'network_embedding:__choice__'), 'LearnedEntityEmbedding'),
-                                ForbiddenEqualsClause(cs.get_hyperparameter('encoder:__choice__'), encoder)
-                            ))
-                            break
-                        except ValueError:
-                            # change the default and try again
-                            try:
-                                default = possible_default_embeddings.pop()
-                            except IndexError:
-                                raise ValueError("Cannot find a legal default configuration")
-                            cs.get_hyperparameter('network_embedding:__choice__').default_value = default
-
         # Disable CyclicLR until todo is completed.
         if 'lr_scheduler' in self.named_steps.keys() and 'trainer' in self.named_steps.keys():
             trainers = cs.get_hyperparameter('trainer:__choice__').choices
@@ -347,7 +318,8 @@ def _add_forbidden_conditions(self, cs):
                 if cyclic_lr_name in available_schedulers:
                     # disable snapshot ensembles and stochastic weight averaging
                     snapshot_ensemble_hyperparameter = cs.get_hyperparameter(f'trainer:{trainer}:use_snapshot_ensemble')
-                    if hasattr(snapshot_ensemble_hyperparameter, 'choices') and True in snapshot_ensemble_hyperparameter.choices:
+                    if hasattr(snapshot_ensemble_hyperparameter, 'choices') and \
+                            True in snapshot_ensemble_hyperparameter.choices:
                         cs.add_forbidden_clause(ForbiddenAndConjunction(
                             ForbiddenEqualsClause(snapshot_ensemble_hyperparameter, True),
                             ForbiddenEqualsClause(cs.get_hyperparameter('lr_scheduler:__choice__'), cyclic_lr_name)
@@ -549,7 +521,6 @@ def _check_search_space_updates(self, include: Optional[Dict[str, Any]],
                                                                       node_hyperparameters,
                                                                       update.hyperparameter))
 
-
     def _get_pipeline_steps(self, dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]]
                             ) -> List[Tuple[str, PipelineStepType]]:
         """
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/TabularColumnTransformer.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/TabularColumnTransformer.py
index 6b38b4650..48f40e9fe 100644
--- a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/TabularColumnTransformer.py
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/TabularColumnTransformer.py
@@ -24,6 +24,7 @@ def __init__(self, random_state: Optional[Union[np.random.RandomState, int]] = N
         self.add_fit_requirements([
             FitRequirement('numerical_columns', (List,), user_defined=True, dataset_property=True),
             FitRequirement('categorical_columns', (List,), user_defined=True, dataset_property=True)])
+        
 
     def get_column_transformer(self) -> ColumnTransformer:
         """
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/coalescer/base_coalescer.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/coalescer/base_coalescer.py
index b572f8343..59918f62c 100644
--- a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/coalescer/base_coalescer.py
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/coalescer/base_coalescer.py
@@ -12,7 +12,6 @@ def __init__(self) -> None:
         self._processing = True
         self.add_fit_requirements([
             FitRequirement('categorical_columns', (List,), user_defined=True, dataset_property=True),
-            FitRequirement('categories', (List,), user_defined=True, dataset_property=True)
         ])
 
     def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/column_splitting/ColumnSplitter.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/column_splitting/ColumnSplitter.py
new file mode 100644
index 000000000..437198d9e
--- /dev/null
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/column_splitting/ColumnSplitter.py
@@ -0,0 +1,83 @@
+from typing import Any, Dict, List, Optional, Union
+
+from ConfigSpace.configuration_space import ConfigurationSpace
+from ConfigSpace.hyperparameters import (
+    UniformIntegerHyperparameter,
+)
+
+import numpy as np
+
+from autoPyTorch.constants import MIN_CATEGORIES_FOR_EMBEDDING_MAX
+from autoPyTorch.datasets.base_dataset import BaseDatasetPropertiesType
+from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.base_tabular_preprocessing import \
+    autoPyTorchTabularPreprocessingComponent
+from autoPyTorch.utils.common import HyperparameterSearchSpace, add_hyperparameter
+
+
+class ColumnSplitter(autoPyTorchTabularPreprocessingComponent):
+    """
+    Removes features that have the same value in the training data.
+    """
+    def __init__(
+        self,
+        min_categories_for_embedding: float = 5,
+        random_state: Optional[np.random.RandomState] = None
+    ):
+        self.min_categories_for_embedding = min_categories_for_embedding
+        self.random_state = random_state
+
+        self.special_feature_types: Dict[str, List] = dict(encode_columns=[], embed_columns=[])
+        self.num_categories_per_col: Optional[List] = None
+        super().__init__()
+
+    def fit(self, X: Dict[str, Any], y: Optional[Any] = None) -> 'ColumnSplitter':
+
+        self.check_requirements(X, y)
+
+        if len(X['dataset_properties']['categorical_columns']) > 0:
+            self.num_categories_per_col = []
+            for categories_per_column, column in zip(X['dataset_properties']['num_categories_per_col'],
+                                                     X['dataset_properties']['categorical_columns']):
+                if (
+                    categories_per_column >= self.min_categories_for_embedding
+                ):
+                    self.special_feature_types['embed_columns'].append(column)
+                    # we only care about the categories for columns to be embedded
+                    self.num_categories_per_col.append(categories_per_column)
+                else:
+                    self.special_feature_types['encode_columns'].append(column)
+
+        return self
+
+    def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
+        if self.num_categories_per_col is not None:
+            # update such that only n categories for embedding columns is passed
+            X['dataset_properties']['num_categories_per_col'] = self.num_categories_per_col
+        X.update(self.special_feature_types)
+        return X
+
+    @staticmethod
+    def get_properties(
+        dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None
+    ) -> Dict[str, Union[str, bool]]:
+
+        return {
+            'shortname': 'ColumnSplitter',
+            'name': 'Column Splitter',
+            'handles_sparse': False,
+        }
+
+    @staticmethod
+    def get_hyperparameter_search_space(
+        dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None,
+        min_categories_for_embedding: HyperparameterSearchSpace = HyperparameterSearchSpace(
+            hyperparameter="min_categories_for_embedding",
+            value_range=(3, MIN_CATEGORIES_FOR_EMBEDDING_MAX),
+            default_value=3,
+            log=True),
+    ) -> ConfigurationSpace:
+        cs = ConfigurationSpace()
+
+        add_hyperparameter(cs, min_categories_for_embedding, UniformIntegerHyperparameter)
+
+        return cs
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/column_splitting/__init__.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/column_splitting/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/OneHotEncoder.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/OneHotEncoder.py
index 5c9281891..4f8878615 100644
--- a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/OneHotEncoder.py
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/OneHotEncoder.py
@@ -22,10 +22,10 @@ def fit(self, X: Dict[str, Any], y: Any = None) -> BaseEncoder:
 
         self.preprocessor['categorical'] = OHE(
             # It is safer to have the OHE produce a 0 array than to crash a good configuration
-            categories=X['dataset_properties']['categories']
-            if len(X['dataset_properties']['categories']) > 0 else 'auto',
+            categories='auto',
             sparse=False,
-            handle_unknown='ignore')
+            handle_unknown='ignore',
+            dtype=np.float32)
         return self
 
     @staticmethod
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/base_encoder.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/base_encoder.py
index eadc0a188..0a2486420 100644
--- a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/base_encoder.py
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/encoding/base_encoder.py
@@ -13,8 +13,7 @@ class BaseEncoder(autoPyTorchTabularPreprocessingComponent):
     def __init__(self) -> None:
         super().__init__()
         self.add_fit_requirements([
-            FitRequirement('categorical_columns', (List,), user_defined=True, dataset_property=True),
-            FitRequirement('categories', (List,), user_defined=True, dataset_property=True)])
+            FitRequirement('categorical_columns', (List,), user_defined=True, dataset_property=True), ])
 
     def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
         """
diff --git a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/feature_preprocessing/utils.py b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/feature_preprocessing/utils.py
index 5d91ac2b6..1968e9f3e 100644
--- a/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/feature_preprocessing/utils.py
+++ b/autoPyTorch/pipeline/components/preprocessing/tabular_preprocessing/feature_preprocessing/utils.py
@@ -1,6 +1,6 @@
 import warnings
 from math import ceil, floor
-from typing import Dict, List, Optional, Sequence
+from typing import Dict, List, Optional, Sequence, Tuple
 
 from autoPyTorch.datasets.base_dataset import BaseDatasetPropertiesType
 from autoPyTorch.utils.common import HyperparameterSearchSpace, HyperparameterValueType
@@ -81,11 +81,17 @@ def percentage_value_range_to_integer_range(
             log = False
         else:
             log = hyperparameter_search_space.log
+
+        value_range: Tuple
+        if len(hyperparameter_search_space.value_range) == 2:
+            value_range = (floor(float(hyperparameter_search_space.value_range[0]) * n_features),
+                           floor(float(hyperparameter_search_space.value_range[-1]) * n_features))
+        else:
+            value_range = (floor(float(hyperparameter_search_space.value_range[0]) * n_features),)
+
         hyperparameter_search_space = HyperparameterSearchSpace(
             hyperparameter=hyperparameter_name,
-            value_range=(
-                floor(float(hyperparameter_search_space.value_range[0]) * n_features),
-                floor(float(hyperparameter_search_space.value_range[1]) * n_features)),
+            value_range=value_range,
             default_value=ceil(float(hyperparameter_search_space.default_value) * n_features),
             log=log)
     else:
diff --git a/autoPyTorch/pipeline/components/setup/early_preprocessor/EarlyPreprocessing.py b/autoPyTorch/pipeline/components/setup/early_preprocessor/EarlyPreprocessing.py
index 597f14ca6..486ce2ef7 100644
--- a/autoPyTorch/pipeline/components/setup/early_preprocessor/EarlyPreprocessing.py
+++ b/autoPyTorch/pipeline/components/setup/early_preprocessor/EarlyPreprocessing.py
@@ -40,7 +40,10 @@ def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
         X['X_train'] = preprocess(dataset=X_train, transforms=transforms)
 
         # We need to also save the preprocess transforms for inference
-        X.update({'preprocess_transforms': transforms})
+        X.update({
+                 'preprocess_transforms': transforms,
+                 'shape_after_preprocessing': X['X_train'].shape[1:]
+                 })
         return X
 
     @staticmethod
diff --git a/autoPyTorch/pipeline/components/setup/network_backbone/base_network_backbone.py b/autoPyTorch/pipeline/components/setup/network_backbone/base_network_backbone.py
index ef3cc1768..f63ebd578 100644
--- a/autoPyTorch/pipeline/components/setup/network_backbone/base_network_backbone.py
+++ b/autoPyTorch/pipeline/components/setup/network_backbone/base_network_backbone.py
@@ -30,8 +30,7 @@ def __init__(self,
         self.add_fit_requirements([
             FitRequirement('X_train', (np.ndarray, pd.DataFrame, spmatrix), user_defined=True,
                            dataset_property=False),
-            FitRequirement('input_shape', (Iterable,), user_defined=True, dataset_property=True),
-            FitRequirement('tabular_transformer', (BaseEstimator,), user_defined=False, dataset_property=False),
+            FitRequirement('shape_after_preprocessing', (Iterable,), user_defined=False, dataset_property=False),
             FitRequirement('network_embedding', (nn.Module,), user_defined=False, dataset_property=False)
         ])
         self.backbone: nn.Module = None
@@ -49,9 +48,8 @@ def fit(self, X: Dict[str, Any], y: Any = None) -> BaseEstimator:
             Self
         """
         self.check_requirements(X, y)
-        X_train = X['X_train']
 
-        input_shape = X_train.shape[1:]
+        input_shape = X['shape_after_preprocessing']
 
         input_shape = get_output_shape(X['network_embedding'], input_shape=input_shape)
         self.input_shape = input_shape
diff --git a/autoPyTorch/pipeline/components/setup/network_backbone/utils.py b/autoPyTorch/pipeline/components/setup/network_backbone/utils.py
index 1af7ad7af..c1e7aae7b 100644
--- a/autoPyTorch/pipeline/components/setup/network_backbone/utils.py
+++ b/autoPyTorch/pipeline/components/setup/network_backbone/utils.py
@@ -25,7 +25,9 @@ def get_output_shape(network: torch.nn.Module, input_shape: Tuple[int, ...]
     :param input_shape: shape of the input
     :return: output_shape
     """
-    placeholder = torch.randn((2, *input_shape), dtype=torch.float)
+    # as we are using nn embedding, 2 is a safe upper limit as 3
+    # is the lowest `min_values_for_embedding` can be
+    placeholder = torch.randint(high=2, size=(2, *input_shape), dtype=torch.float)
     with torch.no_grad():
         output = network(placeholder)
 
diff --git a/autoPyTorch/pipeline/components/setup/network_embedding/LearnedEntityEmbedding.py b/autoPyTorch/pipeline/components/setup/network_embedding/LearnedEntityEmbedding.py
index 49ecf40b7..746ef7911 100644
--- a/autoPyTorch/pipeline/components/setup/network_embedding/LearnedEntityEmbedding.py
+++ b/autoPyTorch/pipeline/components/setup/network_embedding/LearnedEntityEmbedding.py
@@ -2,8 +2,9 @@
 
 from ConfigSpace.configuration_space import ConfigurationSpace
 from ConfigSpace.hyperparameters import (
+    CategoricalHyperparameter,
     UniformFloatHyperparameter,
-    UniformIntegerHyperparameter
+    UniformIntegerHyperparameter,
 )
 
 import numpy as np
@@ -16,39 +17,58 @@
 from autoPyTorch.utils.common import HyperparameterSearchSpace, add_hyperparameter
 
 
+def get_num_output_dimensions(config: Dict[str, Any], num_categs_per_feature: List[int]) -> List[int]:
+    """
+        Returns list of embedding sizes for each categorical variable.
+        Selects this adaptively based on training_datset.
+        Note: Assumes there is at least one embed feature.
+
+    Args:
+        config (Dict[str, Any]): 
+            contains the hyperparameters required to calculate the `num_output_dimensions`
+        num_categs_per_feature (List[int]):
+            list containing number of categories for each feature that is to be embedded,
+            0 if the column is not an embed column
+
+    Returns:
+        List[int]:
+            list containing the output embedding size for each column,
+            1 if the column is not an embed column
+    """
+
+    max_embedding_dim = config['max_embedding_dim']
+    embed_exponent = config['embed_exponent']
+    size_factor = config['embedding_size_factor']
+    num_output_dimensions = [int(size_factor*max(
+                                                 2,
+                                                 min(max_embedding_dim,
+                                                     1.6 * num_categories**embed_exponent)))
+                             if num_categories > 0 else 1 for num_categories in num_categs_per_feature]
+    return num_output_dimensions
+
+
 class _LearnedEntityEmbedding(nn.Module):
     """ Learned entity embedding module for categorical features"""
 
-    def __init__(self, config: Dict[str, Any], num_input_features: np.ndarray, num_numerical_features: int):
+    def __init__(self, config: Dict[str, Any], num_categories_per_col: np.ndarray, num_features_excl_embed: int):
         """
         Args:
             config (Dict[str, Any]): The configuration sampled by the hyperparameter optimizer
-            num_input_features (np.ndarray): column wise information of number of output columns after transformation
-                for each categorical column and 0 for numerical columns
-            num_numerical_features (int): number of numerical features in X
+            num_categories_per_col (np.ndarray): number of categories per categorical columns that will be embedded
+            num_features_excl_embed (int): number of features in X excluding the features that need to be embedded
         """
         super().__init__()
         self.config = config
-
-        self.num_numerical = num_numerical_features
         # list of number of categories of categorical data
         # or 0 for numerical data
-        self.num_input_features = num_input_features
-        categorical_features = self.num_input_features > 0
-
-        self.num_categorical_features = self.num_input_features[categorical_features]
-
-        self.embed_features = [num_in >= config["min_unique_values_for_embedding"] for num_in in
-                               self.num_input_features]
-        self.num_output_dimensions = [0] * num_numerical_features
-        self.num_output_dimensions.extend([config["dimension_reduction_" + str(i)] * num_in for i, num_in in
-                                           enumerate(self.num_categorical_features)])
-        self.num_output_dimensions = [int(np.clip(num_out, 1, num_in - 1)) for num_out, num_in in
-                                      zip(self.num_output_dimensions, self.num_input_features)]
-        self.num_output_dimensions = [num_out if embed else num_in for num_out, embed, num_in in
-                                      zip(self.num_output_dimensions, self.embed_features,
-                                          self.num_input_features)]
-        self.num_out_feats = self.num_numerical + sum(self.num_output_dimensions)
+        self.num_categories_per_col = num_categories_per_col
+        self.embed_features = self.num_categories_per_col > 0
+
+        self.num_embed_features = self.num_categories_per_col[self.embed_features]
+
+        self.num_output_dimensions = get_num_output_dimensions(config, self.num_categories_per_col)
+
+        self.num_out_feats = num_features_excl_embed + sum(self.num_output_dimensions)
 
         self.ee_layers = self._create_ee_layers()
 
@@ -56,32 +76,28 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
         # pass the columns of each categorical feature through entity embedding layer
         # before passing it through the model
         concat_seq = []
-        last_concat = 0
-        x_pointer = 0
+
         layer_pointer = 0
-        for num_in, embed in zip(self.num_input_features, self.embed_features):
+        for x_pointer, embed in enumerate(self.embed_features):
+            current_feature_slice = x[:, x_pointer]
             if not embed:
-                x_pointer += 1
+                concat_seq.append(current_feature_slice.view(-1, 1))
                 continue
-            if x_pointer > last_concat:
-                concat_seq.append(x[:, last_concat: x_pointer])
-            categorical_feature_slice = x[:, x_pointer: x_pointer + num_in]
-            concat_seq.append(self.ee_layers[layer_pointer](categorical_feature_slice))
+            current_feature_slice = current_feature_slice.to(torch.int)
+            concat_seq.append(self.ee_layers[layer_pointer](current_feature_slice))
             layer_pointer += 1
-            x_pointer += num_in
-            last_concat = x_pointer
 
-        concat_seq.append(x[:, last_concat:])
         return torch.cat(concat_seq, dim=1)
 
     def _create_ee_layers(self) -> nn.ModuleList:
         # entity embeding layers are Linear Layers
         layers = nn.ModuleList()
-        for i, (num_in, embed, num_out) in enumerate(zip(self.num_input_features, self.embed_features,
-                                                         self.num_output_dimensions)):
+        for num_cat, embed, num_out in zip(self.num_categories_per_col,
+                                           self.embed_features,
+                                           self.num_output_dimensions):
             if not embed:
                 continue
-            layers.append(nn.Linear(num_in, num_out))
+            layers.append(nn.Embedding(num_cat, num_out))
         return layers
 
 
@@ -94,33 +110,32 @@ def __init__(self, random_state: Optional[np.random.RandomState] = None, **kwarg
         super().__init__(random_state=random_state)
         self.config = kwargs
 
-    def build_embedding(self, num_input_features: np.ndarray, num_numerical_features: int) -> nn.Module:
+    def build_embedding(self, num_categories_per_col: np.ndarray, num_features_excl_embed: int) -> nn.Module:
         return _LearnedEntityEmbedding(config=self.config,
-                                       num_input_features=num_input_features,
-                                       num_numerical_features=num_numerical_features)
+                                       num_categories_per_col=num_categories_per_col,
+                                       num_features_excl_embed=num_features_excl_embed)
 
     @staticmethod
     def get_hyperparameter_search_space(
         dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None,
-        min_unique_values_for_embedding: HyperparameterSearchSpace = HyperparameterSearchSpace(
-            hyperparameter="min_unique_values_for_embedding",
-            value_range=(3, 7),
-            default_value=5,
-            log=True),
-        dimension_reduction: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="dimension_reduction",
-                                                                                   value_range=(0, 1),
-                                                                                   default_value=0.5),
+        embed_exponent: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="embed_exponent",
+                                                                                   value_range=(0.56,),
+                                                                                   default_value=0.56),
+        max_embedding_dim: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="max_embedding_dim",
+                                                                                   value_range=(100,),
+                                                                                   default_value=100),
+        embedding_size_factor: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="embedding_size_factor",
+                                                                                     value_range=(0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5),
+                                                                                     default_value=1,
+                                                                                     ),
     ) -> ConfigurationSpace:
         cs = ConfigurationSpace()
-        add_hyperparameter(cs, min_unique_values_for_embedding, UniformIntegerHyperparameter)
         if dataset_properties is not None:
-            for i in range(len(dataset_properties['categorical_columns'])
-                           if isinstance(dataset_properties['categorical_columns'], List) else 0):
-                ee_dimensions_search_space = HyperparameterSearchSpace(hyperparameter="dimension_reduction_" + str(i),
-                                                                       value_range=dimension_reduction.value_range,
-                                                                       default_value=dimension_reduction.default_value,
-                                                                       log=dimension_reduction.log)
-                add_hyperparameter(cs, ee_dimensions_search_space, UniformFloatHyperparameter)
+            if len(dataset_properties['categorical_columns']) > 0:
+                add_hyperparameter(cs, embed_exponent, UniformFloatHyperparameter)
+                add_hyperparameter(cs, max_embedding_dim, UniformIntegerHyperparameter)
+                add_hyperparameter(cs, embedding_size_factor, CategoricalHyperparameter)
+
         return cs
 
     @staticmethod
diff --git a/autoPyTorch/pipeline/components/setup/network_embedding/NoEmbedding.py b/autoPyTorch/pipeline/components/setup/network_embedding/NoEmbedding.py
index 830bdbb00..73d4708a0 100644
--- a/autoPyTorch/pipeline/components/setup/network_embedding/NoEmbedding.py
+++ b/autoPyTorch/pipeline/components/setup/network_embedding/NoEmbedding.py
@@ -24,7 +24,7 @@ class NoEmbedding(NetworkEmbeddingComponent):
     def __init__(self, random_state: Optional[np.random.RandomState] = None):
         super().__init__(random_state=random_state)
 
-    def build_embedding(self, num_input_features: np.ndarray, num_numerical_features: int) -> nn.Module:
+    def build_embedding(self, num_categories_per_col: np.ndarray, num_features_excl_embed: int) -> nn.Module:
         return _NoEmbedding()
 
     @staticmethod
diff --git a/autoPyTorch/pipeline/components/setup/network_embedding/base_network_embedding.py b/autoPyTorch/pipeline/components/setup/network_embedding/base_network_embedding.py
index 998055d2b..5fa451434 100644
--- a/autoPyTorch/pipeline/components/setup/network_embedding/base_network_embedding.py
+++ b/autoPyTorch/pipeline/components/setup/network_embedding/base_network_embedding.py
@@ -1,5 +1,4 @@
-import copy
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Dict, List, Optional, Tuple
 
 import numpy as np
 
@@ -8,27 +7,32 @@
 from torch import nn
 
 from autoPyTorch.pipeline.components.setup.base_setup import autoPyTorchSetupComponent
+from autoPyTorch.utils.common import FitRequirement
 
 
 class NetworkEmbeddingComponent(autoPyTorchSetupComponent):
     def __init__(self, random_state: Optional[np.random.RandomState] = None):
         super().__init__(random_state=random_state)
+        self.add_fit_requirements([
+            FitRequirement('num_categories_per_col', (List,), user_defined=True, dataset_property=True),
+            FitRequirement('shape_after_preprocessing', (Tuple[int],), user_defined=False, dataset_property=False)])
+
         self.embedding: Optional[nn.Module] = None
 
     def fit(self, X: Dict[str, Any], y: Any = None) -> BaseEstimator:
 
-        num_numerical_columns, num_input_features = self._get_required_info_from_data(X)
+        num_features_excl_embed, num_categories_per_col = self._get_required_info_from_data(X)
 
         self.embedding = self.build_embedding(
-            num_input_features=num_input_features,
-            num_numerical_features=num_numerical_columns)
+            num_categories_per_col=num_categories_per_col,
+            num_features_excl_embed=num_features_excl_embed)
         return self
 
     def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
         X.update({'network_embedding': self.embedding})
         return X
 
-    def build_embedding(self, num_input_features: np.ndarray, num_numerical_features: int) -> nn.Module:
+    def build_embedding(self, num_categories_per_col: np.ndarray, num_features_excl_embed: int) -> nn.Module:
         raise NotImplementedError
 
     def _get_required_info_from_data(self, X: Dict[str, Any]) -> Tuple[int, np.ndarray]:
@@ -48,22 +52,16 @@ def _get_required_info_from_data(self, X: Dict[str, Any]) -> Tuple[int, np.ndarr
                 number of categories for categorical columns and
                 0 for numerical columns
         """
-        # Feature preprocessors can alter numerical columns
-        if len(X['dataset_properties']['numerical_columns']) == 0:
-            num_numerical_columns = 0
-        else:
-            X_train = copy.deepcopy(X['backend'].load_datamanager().train_tensors[0][:2])
+        num_cols = X['shape_after_preprocessing']
+        # only works for 2D(rows, features) tabular data
+        num_features_excl_embed = num_cols[0] - len(X['embed_columns'])
 
-            numerical_column_transformer = X['tabular_transformer'].preprocessor. \
-                named_transformers_['numerical_pipeline']
-            num_numerical_columns = numerical_column_transformer.transform(
-                X_train[:, X['dataset_properties']['numerical_columns']]).shape[1]
+        num_categories_per_col = np.zeros(num_cols, dtype=np.int16)
 
-        num_cols = num_numerical_columns + len(X['dataset_properties']['categorical_columns'])
-        num_input_feats = np.zeros(num_cols, dtype=np.int32)
+        categories_per_embed_col = X['dataset_properties']['num_categories_per_col']
 
-        categories = X['dataset_properties']['categories']
-        for idx, cats in enumerate(categories, start=num_numerical_columns):
-            num_input_feats[idx] = len(cats)
+        # only fill num categories for embedding columns
+        for idx, cats in enumerate(categories_per_embed_col, start=num_features_excl_embed):
+            num_categories_per_col[idx] = cats
 
-        return num_numerical_columns, num_input_feats
+        return num_features_excl_embed, num_categories_per_col
diff --git a/autoPyTorch/pipeline/components/training/trainer/__init__.py b/autoPyTorch/pipeline/components/training/trainer/__init__.py
index b380659da..cc881c76b 100755
--- a/autoPyTorch/pipeline/components/training/trainer/__init__.py
+++ b/autoPyTorch/pipeline/components/training/trainer/__init__.py
@@ -442,7 +442,7 @@ def _fit(self, X: Dict[str, Any], y: Any = None, **kwargs: Any) -> 'TrainerChoic
 
             # change model
             update_model_state_dict_from_swa(X['network'], self.choice.swa_model.state_dict())
-            if self.choice.use_snapshot_ensemble:
+            if self.choice.use_snapshot_ensemble and len(self.choice.model_snapshots) > 0:
                 # we update only the last network which pertains to the stochastic weight averaging model
                 swa_utils.update_bn(X['train_data_loader'], self.choice.model_snapshots[-1].double())
 
@@ -481,7 +481,7 @@ def _get_train_label(self, X: Dict[str, Any]) -> List[int]:
         Verifies and validates the labels from train split.
         """
         # Ensure that the split is not missing any class.
-        labels: List[int] = X['y_train'][X['backend'].load_datamanager().splits[X['split_id']][0]]
+        labels: List[int] = X['y_train'][X['train_indices']]
         if STRING_TO_TASK_TYPES[X['dataset_properties']['task_type']] in CLASSIFICATION_TASKS:
             unique_labels = len(np.unique(labels))
             if unique_labels < X['dataset_properties']['output_shape']:
diff --git a/autoPyTorch/pipeline/tabular_classification.py b/autoPyTorch/pipeline/tabular_classification.py
index 91705e3e4..1b49f0d36 100644
--- a/autoPyTorch/pipeline/tabular_classification.py
+++ b/autoPyTorch/pipeline/tabular_classification.py
@@ -1,4 +1,3 @@
-import copy
 import warnings
 from typing import Any, Dict, List, Optional, Tuple, Union
 
@@ -18,8 +17,8 @@
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.TabularColumnTransformer import (
     TabularColumnTransformer
 )
-from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.coalescer import (
-    CoalescerChoice
+from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.column_splitting.ColumnSplitter import (
+    ColumnSplitter
 )
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.encoding import (
     EncoderChoice
@@ -29,8 +28,6 @@
 )
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.imputation.SimpleImputer import SimpleImputer
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.scaling import ScalerChoice
-from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.variance_thresholding. \
-    VarianceThreshold import VarianceThreshold
 from autoPyTorch.pipeline.components.setup.early_preprocessor.EarlyPreprocessing import EarlyPreprocessing
 from autoPyTorch.pipeline.components.setup.lr_scheduler import SchedulerChoice
 from autoPyTorch.pipeline.components.setup.network.base_network import NetworkComponent
@@ -54,20 +51,21 @@ class TabularClassificationPipeline(ClassifierMixin, BasePipeline):
     It implements a pipeline, which includes the following as steps:
 
     1. `imputer`
-    2. `encoder`
-    3. `scaler`
-    4. `feature_preprocessor`
-    5. `tabular_transformer`
-    6. `preprocessing`
-    7. `network_embedding`
-    8. `network_backbone`
-    9. `network_head`
-    10. `network`
-    11. `network_init`
-    12. `optimizer`
-    13. `lr_scheduler`
-    14. `data_loader`
-    15. `trainer`
+    2. `column_splitter
+    3. `encoder`
+    4. `scaler`
+    5. `feature_preprocessor`
+    6. `tabular_transformer`
+    7. `preprocessing`
+    8. `network_embedding`
+    9. `network_backbone`
+    10. `network_head`
+    11. `network`
+    12. `network_init`
+    13. `optimizer`
+    14. `lr_scheduler`
+    15. `data_loader`
+    16. `trainer`
 
     Contrary to the sklearn API it is not possible to enumerate the
     possible parameters in the __init__ function because we only know the
@@ -133,21 +131,23 @@ def __init__(
         # model, so we comply with https://pytorch.org/docs/stable/notes/randomness.html
         torch.manual_seed(self.random_state.get_state()[1][0])
 
-    def _predict_proba(self, X: np.ndarray) -> np.ndarray:
-        # Pre-process X
-        loader = self.named_steps['data_loader'].get_loader(X=X)
-        pred = self.named_steps['network'].predict(loader)
-        if isinstance(self.dataset_properties['output_shape'], int):
-            # The final layer is always softmax now (`pred` already gives pseudo proba)
-            return pred
-        else:
-            raise ValueError("Expected output_shape to be integer, got {},"
-                             "Tabular Classification only supports 'binary' and 'multiclass' outputs"
-                             "got {}".format(type(self.dataset_properties['output_shape']),
-                                             self.dataset_properties['output_type']))
+    def predict(self, X: np.ndarray, batch_size: Optional[int] = None) -> np.ndarray:
+        """Predict the output using the selected model.
+
+        Args:
+            X (np.ndarray): input data to the array
+            batch_size (Optional[int]): batch_size controls whether the pipeline will be
+                called on small chunks of the data. Useful when calling the
+                predict method on the whole array X results in a MemoryError.
+
+        Returns:
+            np.ndarray: the predicted values given input X
+        """
+        probas = super().predict(X=X, batch_size=batch_size)
+        return np.argmax(probas, axis=1)
 
     def predict_proba(self, X: np.ndarray, batch_size: Optional[int] = None) -> np.ndarray:
-        """predict_proba.
+        """predict probabilities.
 
         Args:
             X (np.ndarray):
@@ -161,30 +161,19 @@ def predict_proba(self, X: np.ndarray, batch_size: Optional[int] = None) -> np.n
                 Probabilities of the target being certain class
         """
         if batch_size is None:
-            y = self._predict_proba(X)
-
+            warnings.warn("Batch size not provided. "
+                          "Will predict on the whole data in a single iteration")
+            batch_size = X.shape[0]
+        loader = self.named_steps['data_loader'].get_loader(X=X, batch_size=batch_size)
+        pred = self.named_steps['network'].predict(loader)
+        if isinstance(self.dataset_properties['output_shape'], int):
+            # The final layer is always softmax now (`pred` already gives pseudo proba)
+            return pred
         else:
-            if not isinstance(batch_size, int):
-                raise ValueError("Argument 'batch_size' must be of type int, "
-                                 "but is '%s'" % type(batch_size))
-            if batch_size <= 0:
-                raise ValueError("Argument 'batch_size' must be positive, "
-                                 "but is %d" % batch_size)
-
-            else:
-                # Probe for the target array dimensions
-                target = self.predict_proba(X[0:2].copy())
-
-                y = np.zeros((X.shape[0], target.shape[1]),
-                             dtype=np.float32)
-
-                for k in range(max(1, int(np.ceil(float(X.shape[0]) / batch_size)))):
-                    batch_from = k * batch_size
-                    batch_to = min([(k + 1) * batch_size, X.shape[0]])
-                    pred_prob = self.predict_proba(X[batch_from:batch_to], batch_size=None)
-                    y[batch_from:batch_to] = pred_prob.astype(np.float32)
-
-        return y
+            raise ValueError("Expected output_shape to be integer, got {},"
+                             "Tabular Classification only supports 'binary' and 'multiclass' outputs"
+                             "got {}".format(type(self.dataset_properties['output_shape']),
+                                             self.dataset_properties['output_type']))
 
     def score(self, X: np.ndarray, y: np.ndarray,
               batch_size: Optional[int] = None,
@@ -208,7 +197,7 @@ def score(self, X: np.ndarray, y: np.ndarray,
         """
         from autoPyTorch.pipeline.components.training.metrics.utils import get_metrics, calculate_score
         metrics = get_metrics(self.dataset_properties, [metric_name])
-        y_pred = self.predict(X, batch_size=batch_size)
+        y_pred = self.predict_proba(X, batch_size=batch_size)
         score = calculate_score(y, y_pred, task_type=STRING_TO_TASK_TYPES[str(self.dataset_properties['task_type'])],
                                 metrics=metrics)[metric_name]
         return score
@@ -289,6 +278,7 @@ def _get_pipeline_steps(
             ("imputer", SimpleImputer(random_state=self.random_state)),
             # ("variance_threshold", VarianceThreshold(random_state=self.random_state)),
             # ("coalescer", CoalescerChoice(default_dataset_properties, random_state=self.random_state)),
+            ("column_splitter", ColumnSplitter(random_state=self.random_state)),
             ("encoder", EncoderChoice(default_dataset_properties, random_state=self.random_state)),
             ("scaler", ScalerChoice(default_dataset_properties, random_state=self.random_state)),
             ("feature_preprocessor", FeatureProprocessorChoice(default_dataset_properties,
diff --git a/autoPyTorch/pipeline/tabular_regression.py b/autoPyTorch/pipeline/tabular_regression.py
index 4737bf57d..46569a08b 100644
--- a/autoPyTorch/pipeline/tabular_regression.py
+++ b/autoPyTorch/pipeline/tabular_regression.py
@@ -1,4 +1,3 @@
-import copy
 import warnings
 from typing import Any, Dict, List, Optional, Tuple, Union
 
@@ -18,8 +17,11 @@
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.TabularColumnTransformer import (
     TabularColumnTransformer
 )
-from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.coalescer import (
-    CoalescerChoice
+from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.column_splitting.ColumnSplitter import (
+    ColumnSplitter
+)
+from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.column_splitting.ColumnSplitter import (
+    ColumnSplitter
 )
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.encoding import (
     EncoderChoice
@@ -29,8 +31,6 @@
 )
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.imputation.SimpleImputer import SimpleImputer
 from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.scaling import ScalerChoice
-from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.variance_thresholding. \
-    VarianceThreshold import VarianceThreshold
 from autoPyTorch.pipeline.components.setup.early_preprocessor.EarlyPreprocessing import EarlyPreprocessing
 from autoPyTorch.pipeline.components.setup.lr_scheduler import SchedulerChoice
 from autoPyTorch.pipeline.components.setup.network.base_network import NetworkComponent
@@ -56,20 +56,21 @@ class TabularRegressionPipeline(RegressorMixin, BasePipeline):
     It implements a pipeline, which includes the following as steps:
 
     1. `imputer`
-    2. `encoder`
-    3. `scaler`
-    4. `feature_preprocessor`
-    5. `tabular_transformer`
-    6. `preprocessing`
-    7. `network_embedding`
-    8. `network_backbone`
-    9. `network_head`
-    10. `network`
-    11. `network_init`
-    12. `optimizer`
-    13. `lr_scheduler`
-    14. `data_loader`
-    15. `trainer`
+    2. `column_splitter
+    3. `encoder`
+    4. `scaler`
+    5. `feature_preprocessor`
+    6. `tabular_transformer`
+    7. `preprocessing`
+    8. `network_embedding`
+    9. `network_backbone`
+    10. `network_head`
+    11. `network`
+    12. `network_init`
+    13. `optimizer`
+    14. `lr_scheduler`
+    15. `data_loader`
+    16. `trainer`
 
     Contrary to the sklearn API it is not possible to enumerate the
     possible parameters in the __init__ function because we only know the
@@ -235,8 +236,9 @@ def _get_pipeline_steps(
 
         steps.extend([
             ("imputer", SimpleImputer(random_state=self.random_state)),
-            ("variance_threshold", VarianceThreshold(random_state=self.random_state)),
-            ("coalescer", CoalescerChoice(default_dataset_properties, random_state=self.random_state)),
+            # ("variance_threshold", VarianceThreshold(random_state=self.random_state)),
+            # ("coalescer", CoalescerChoice(default_dataset_properties, random_state=self.random_state)),
+            ("column_splitter", ColumnSplitter(random_state=self.random_state)),
             ("encoder", EncoderChoice(default_dataset_properties, random_state=self.random_state)),
             ("scaler", ScalerChoice(default_dataset_properties, random_state=self.random_state)),
             ("feature_preprocessor", FeatureProprocessorChoice(default_dataset_properties,
diff --git a/autoPyTorch/utils/hyperparameter_search_space_update.py b/autoPyTorch/utils/hyperparameter_search_space_update.py
index de5a45e12..9891f5f45 100644
--- a/autoPyTorch/utils/hyperparameter_search_space_update.py
+++ b/autoPyTorch/utils/hyperparameter_search_space_update.py
@@ -122,29 +122,6 @@ def append(self, node_name: str, hyperparameter: str, value_range: Union[List, T
                                                             value_range=value_range,
                                                             default_value=default_value,
                                                             log=log))
-    def remove_update(self, update: HyperparameterSearchSpaceUpdate) -> None:
-        """
-        removes an update
-
-        Args:
-            node_name (str):
-            The name of the node in the pipeline
-        hyperparameter (str):
-            The name of the hyperparameter
-        value_range (Union[List, Tuple]):
-            In case of categorical hyperparameter, defines the new categorical choices.
-            In case of numerical hyperparameter, defines the new range
-            in the form of (LOWER, UPPER)
-        default_value (Union[int, float, str]):
-            New default value for the hyperparameter
-        log (bool) (default=False):
-            In case of numerical hyperparameters, whether to sample on a log scale
-
-        Returns:
-            None
-        """
-
-        self.updates = [keep_update for keep_update in self.updates if (keep_update.hyperparameter != update.hyperparameter and keep_update.node_name != update.node_name)]
 
     def save_as_file(self, path: str) -> None:
         """
diff --git a/test/test_api/test_api.py b/test/test_api/test_api.py
index 010342f59..285f0e4bb 100644
--- a/test/test_api/test_api.py
+++ b/test/test_api/test_api.py
@@ -4,6 +4,7 @@
 import pickle
 import tempfile
 import unittest
+import unittest.mock
 from test.test_api.utils import dummy_do_dummy_prediction, dummy_eval_train_function
 
 import ConfigSpace as CS
@@ -45,7 +46,7 @@
 
 # Test
 # ====
-@unittest.mock.patch('autoPyTorch.evaluation.train_evaluator.eval_train_function',
+@unittest.mock.patch('autoPyTorch.evaluation.tae.eval_train_function',
                      new=dummy_eval_train_function)
 @pytest.mark.parametrize('openml_id', (40981, ))
 @pytest.mark.parametrize('resampling_strategy,resampling_strategy_args',
@@ -222,7 +223,7 @@ def test_tabular_classification(openml_id, resampling_strategy, backend, resampl
 
 
 @pytest.mark.parametrize('openml_name', ("boston", ))
-@unittest.mock.patch('autoPyTorch.evaluation.train_evaluator.eval_train_function',
+@unittest.mock.patch('autoPyTorch.evaluation.tae.eval_train_function',
                      new=dummy_eval_train_function)
 @pytest.mark.parametrize('resampling_strategy,resampling_strategy_args',
                          ((HoldoutValTypes.holdout_validation, None),
@@ -499,9 +500,10 @@ def test_do_dummy_prediction(dask_client, fit_dictionary_tabular):
     del estimator
 
 
-@unittest.mock.patch('autoPyTorch.evaluation.train_evaluator.eval_train_function',
+@unittest.mock.patch('autoPyTorch.evaluation.tae.eval_train_function',
                      new=dummy_eval_train_function)
 @pytest.mark.parametrize('openml_id', (40981, ))
+@pytest.mark.skip(reason="Fix with new portfolio PR")
 def test_portfolio_selection(openml_id, backend, n_samples):
 
     # Get the data and check that contents of data-manager make sense
@@ -541,7 +543,8 @@ def test_portfolio_selection(openml_id, backend, n_samples):
     assert any(successful_config in portfolio_configs for successful_config in successful_configs)
 
 
-@unittest.mock.patch('autoPyTorch.evaluation.train_evaluator.eval_train_function',
+@pytest.mark.skip(reason="Fix with new portfolio PR")
+@unittest.mock.patch('autoPyTorch.evaluation.tae.eval_train_function',
                      new=dummy_eval_train_function)
 @pytest.mark.parametrize('openml_id', (40981, ))
 def test_portfolio_selection_failure(openml_id, backend, n_samples):
@@ -689,7 +692,7 @@ def test_pipeline_fit(openml_id,
     configuration = estimator.get_search_space(dataset).get_default_configuration()
     pipeline, run_info, run_value, dataset = estimator.fit_pipeline(dataset=dataset,
                                                                     configuration=configuration,
-                                                                    run_time_limit_secs=50,
+                                                                    run_time_limit_secs=70,
                                                                     disable_file_output=disable_file_output,
                                                                     budget_type='epochs',
                                                                     budget=budget
diff --git a/test/test_data/test_feature_validator.py b/test/test_data/test_feature_validator.py
index daf6f9f1e..309fb0107 100644
--- a/test/test_data/test_feature_validator.py
+++ b/test/test_data/test_feature_validator.py
@@ -139,9 +139,9 @@ def test_featurevalidator_fitontypeA_transformtypeB(input_data_featuretest):
     if isinstance(input_data_featuretest, pd.DataFrame):
         pytest.skip("Column order change in pandas is not supported")
     elif isinstance(input_data_featuretest, np.ndarray):
-        complementary_type = validator.numpy_array_to_pandas(input_data_featuretest)
+        complementary_type = validator.numpy_to_pandas(input_data_featuretest)
     elif isinstance(input_data_featuretest, list):
-        complementary_type, _ = validator.list_to_dataframe(input_data_featuretest)
+        complementary_type, _ = validator.list_to_pandas(input_data_featuretest)
     elif sparse.issparse(input_data_featuretest):
         complementary_type = sparse.csr_matrix(input_data_featuretest.todense())
     else:
@@ -290,14 +290,20 @@ def test_features_unsupported_calls_are_raised():
     expected
     """
     validator = TabularFeatureValidator()
-    with pytest.raises(ValueError, match=r"AutoPyTorch does not support time"):
+    with pytest.raises(TypeError, match=r"Valid types are .*"):
         validator.fit(
             pd.DataFrame({'datetime': [pd.Timestamp('20180310')]})
         )
+
+    validator = TabularFeatureValidator()
     with pytest.raises(ValueError, match=r"AutoPyTorch only supports.*yet, the provided input"):
         validator.fit({'input1': 1, 'input2': 2})
-    with pytest.raises(ValueError, match=r"has unsupported dtype string"):
+
+    validator = TabularFeatureValidator()
+    with pytest.raises(TypeError, match=r"Valid types are .*"):
         validator.fit(pd.DataFrame([{'A': 1, 'B': 2}], dtype='string'))
+
+    validator = TabularFeatureValidator()
     with pytest.raises(ValueError, match=r"The feature dimensionality of the train and test"):
         validator.fit(X_train=np.array([[1, 2, 3], [4, 5, 6]]),
                       X_test=np.array([[1, 2, 3, 4], [4, 5, 6, 7]]),
@@ -425,7 +431,7 @@ def test_unknown_encode_value():
     assert expected_row == x_t[0].tolist()
 
     # Notice how there is only one column 'c' to encode
-    assert validator.categories == [list(range(2)) for i in range(1)]
+    assert validator.num_categories_per_col == [2]
 
 
 # Actual checks for the features
@@ -480,13 +486,13 @@ def test_feature_validator_new_data_after_fit(
     if train_data_type == 'pandas':
         old_dtypes = copy.deepcopy(validator.dtypes)
         validator.dtypes = ['dummy' for dtype in X_train.dtypes]
-        with pytest.raises(ValueError, match=r"Changing the dtype of the features after fit"):
+        with pytest.raises(ValueError, match=r"The dtype of the features must not be changed after fit.*"):
             transformed_X = validator.transform(X_test)
         validator.dtypes = old_dtypes
         if test_data_type == 'pandas':
             columns = X_test.columns.tolist()
             X_test = X_test[reversed(columns)]
-            with pytest.raises(ValueError, match=r"Changing the column order of the features"):
+            with pytest.raises(ValueError, match=r"The column order of the features must not be changed after fit.*"):
                 transformed_X = validator.transform(X_test)
 
 
diff --git a/test/test_data/test_utils.py b/test/test_data/test_utils.py
index 4269c4e5f..6228740b0 100644
--- a/test/test_data/test_utils.py
+++ b/test/test_data/test_utils.py
@@ -25,7 +25,7 @@
 from autoPyTorch.data.utils import (
     default_dataset_compression_arg,
     get_dataset_compression_mapping,
-    megabytes,
+    get_raw_memory_usage,
     reduce_dataset_size_if_too_large,
     reduce_precision,
     subsample,
@@ -45,13 +45,14 @@ def test_reduce_dataset_if_too_large(openmlid, as_frame, n_samples):
         X.copy(),
         y=y.copy(),
         is_classification=True,
+        categorical_columns=[],
         random_state=1,
-        memory_allocation=0.001)
+        memory_allocation=0.01)
 
     assert X_converted.shape[0] < X.shape[0]
     assert y_converted.shape[0] < y.shape[0]
 
-    assert megabytes(X_converted) < megabytes(X)
+    assert get_raw_memory_usage(X_converted) < get_raw_memory_usage(X)
 
 
 @pytest.mark.parametrize("X", [np.asarray([[1, 1, 1]] * 30)])
@@ -211,8 +212,18 @@ def test_unsupported_errors():
         ['a', 'b', 'c', 'a', 'b', 'c'],
         ['a', 'b', 'd', 'r', 'b', 'c']])
     with pytest.raises(ValueError, match=r'X.dtype = .*'):
-        reduce_dataset_size_if_too_large(X, is_classification=True, random_state=1, memory_allocation=0)
+        reduce_dataset_size_if_too_large(
+            X,
+            is_classification=True,
+            categorical_columns=[],
+            random_state=1,
+            memory_allocation=0)
 
     X = [[1, 2], [2, 3]]
     with pytest.raises(ValueError, match=r'Unrecognised data type of X, expected data type to be in .*'):
-        reduce_dataset_size_if_too_large(X, is_classification=True, random_state=1, memory_allocation=0)
+        reduce_dataset_size_if_too_large(
+            X,
+            is_classification=True,
+            categorical_columns=[],
+            random_state=1,
+            memory_allocation=0)
diff --git a/test/test_data/test_validation.py b/test/test_data/test_validation.py
index 48a3ccfeb..58481d230 100644
--- a/test/test_data/test_validation.py
+++ b/test/test_data/test_validation.py
@@ -8,7 +8,8 @@
 import sklearn.model_selection
 
 from autoPyTorch.data.tabular_validator import TabularInputValidator
-from autoPyTorch.data.utils import megabytes
+from autoPyTorch.data.utils import get_approximate_mem_usage_in_mb
+from autoPyTorch.utils.common import ispandas
 
 
 @pytest.mark.parametrize('openmlid', [2, 40975, 40984])
@@ -148,16 +149,36 @@ def test_featurevalidator_dataset_compression(input_data_featuretest):
     X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
         input_data_featuretest, input_data_targets, test_size=0.1, random_state=1)
     validator = TabularInputValidator(
-        dataset_compression={'memory_allocation': 0.8 * megabytes(X_train), 'methods': ['precision', 'subsample']}
+        dataset_compression={
+            'memory_allocation': 0.8 * get_approximate_mem_usage_in_mb(X_train, [], None),
+            'methods': ['precision', 'subsample']}
     )
     validator.fit(X_train=X_train, y_train=y_train)
     transformed_X_train, _ = validator.transform(X_train.copy(), y_train.copy())
 
+    if ispandas(X_train):
+        # input validator converts transformed_X_train to numpy and the cat columns are chosen as column indices
+        columns = X_train.columns
+        categorical_columns = [columns[col] for col in validator.feature_validator.categorical_columns]
+    else:
+        categorical_columns = validator.feature_validator.categorical_columns
+
     assert validator._reduced_dtype is not None
-    assert megabytes(transformed_X_train) < megabytes(X_train)
+    assert get_approximate_mem_usage_in_mb(
+        transformed_X_train,
+        validator.feature_validator.categorical_columns,
+        validator.feature_validator.num_categories_per_col
+    ) < get_approximate_mem_usage_in_mb(
+        X_train, categorical_columns, validator.feature_validator.num_categories_per_col)
 
     transformed_X_test, _ = validator.transform(X_test.copy(), y_test.copy())
-    assert megabytes(transformed_X_test) < megabytes(X_test)
+    assert get_approximate_mem_usage_in_mb(
+        transformed_X_test,
+        validator.feature_validator.categorical_columns,
+        validator.feature_validator.num_categories_per_col
+    ) < get_approximate_mem_usage_in_mb(
+        X_test, categorical_columns, validator.feature_validator.num_categories_per_col)
+
     if hasattr(transformed_X_train, 'iloc'):
         assert all(transformed_X_train.dtypes == transformed_X_test.dtypes)
         assert all(transformed_X_train.dtypes == validator._precision)
diff --git a/test/test_pipeline/components/preprocessing/test_tabular_column_transformer.py b/test/test_pipeline/components/preprocessing/test_tabular_column_transformer.py
index a81eb34a2..f5f928bd8 100644
--- a/test/test_pipeline/components/preprocessing/test_tabular_column_transformer.py
+++ b/test/test_pipeline/components/preprocessing/test_tabular_column_transformer.py
@@ -13,8 +13,6 @@
 )
 
 
-# TODO: fix in preprocessing PR
-# @pytest.mark.skip("Skipping tests as preprocessing is not finalised")
 @pytest.mark.parametrize("fit_dictionary_tabular", ['classification_numerical_only',
                                                     'classification_categorical_only',
                                                     'classification_numerical_and_categorical'], indirect=True)
diff --git a/test/test_pipeline/components/setup/test_setup_networks.py b/test/test_pipeline/components/setup/test_setup_networks.py
index f5e9b1bb7..8fa77560f 100644
--- a/test/test_pipeline/components/setup/test_setup_networks.py
+++ b/test/test_pipeline/components/setup/test_setup_networks.py
@@ -19,8 +19,7 @@ def head(request):
     return request.param
 
 
-# TODO: add 'LearnedEntityEmbedding' after preprocessing dix
-@pytest.fixture(params=['NoEmbedding'])
+@pytest.fixture(params=['NoEmbedding', 'LearnedEntityEmbedding'])
 def embedding(request):
     return request.param
 
diff --git a/test/test_pipeline/components/setup/test_setup_preprocessing_node.py b/test/test_pipeline/components/setup/test_setup_preprocessing_node.py
index 1ec858864..5d3b49923 100644
--- a/test/test_pipeline/components/setup/test_setup_preprocessing_node.py
+++ b/test/test_pipeline/components/setup/test_setup_preprocessing_node.py
@@ -37,7 +37,7 @@ def test_tabular_preprocess(self):
             'is_small_preprocess': True,
             'input_shape': (15,),
             'output_shape': 2,
-            'categories': [],
+            'num_categories_per_col': [],
             'issparse': False
         }
         X = dict(X_train=np.random.random((10, 15)),
@@ -64,43 +64,6 @@ def test_tabular_preprocess(self):
         # We expect the transformation always for inference
         self.assertIn('preprocess_transforms', X.keys())
 
-    def test_tabular_no_preprocess(self):
-        dataset_properties = {
-            'numerical_columns': list(range(15)),
-            'categorical_columns': [],
-            'task_type': TASK_TYPES_TO_STRING[TABULAR_CLASSIFICATION],
-            'output_type': OUTPUT_TYPES_TO_STRING[MULTICLASS],
-            'is_small_preprocess': False,
-            'input_shape': (15,),
-            'output_shape': 2,
-            'categories': [],
-            'issparse': False
-        }
-        X = dict(X_train=np.random.random((10, 15)),
-                 y_train=np.random.random(10),
-                 train_indices=[0, 1, 2, 3, 4, 5],
-                 val_indices=[6, 7, 8, 9],
-                 dataset_properties=dataset_properties,
-                 # Training configuration
-                 num_run=16,
-                 device='cpu',
-                 budget_type='epochs',
-                 epochs=10,
-                 torch_num_threads=1,
-                 early_stopping=20,
-                 split_id=0,
-                 backend=self.backend,
-                 )
-
-        pipeline = TabularClassificationPipeline(dataset_properties=dataset_properties)
-        # Remove the trainer
-        pipeline.steps.pop()
-        pipeline = pipeline.fit(X)
-        X = pipeline.transform(X)
-        self.assertIn('preprocess_transforms', X.keys())
-        self.assertIsInstance(X['preprocess_transforms'], list)
-        self.assertIsInstance(X['preprocess_transforms'][-1].preprocessor, BaseEstimator)
-
 
 class ImagePreprocessingTest(unittest.TestCase):
     def setUp(self):
diff --git a/test/test_pipeline/components/training/test_training.py b/test/test_pipeline/components/training/test_training.py
index c011cea38..e76b7ac5f 100644
--- a/test/test_pipeline/components/training/test_training.py
+++ b/test/test_pipeline/components/training/test_training.py
@@ -513,7 +513,7 @@ def dummy_performance(*args, **kwargs):
         'step_interval': StepIntervalUnit.batch
     }
     for item in ['backend', 'lr_scheduler', 'network', 'optimizer', 'train_data_loader', 'val_data_loader',
-                 'device', 'y_train', 'network_snapshots']:
+                 'device', 'y_train', 'network_snapshots', 'train_indices']:
         fit_dictionary[item] = unittest.mock.MagicMock()
 
     fit_dictionary['backend'].temporary_directory = tempfile.mkdtemp()
diff --git a/test/test_pipeline/test_tabular_classification.py b/test/test_pipeline/test_tabular_classification.py
index f557bd855..7e6ff2f8e 100644
--- a/test/test_pipeline/test_tabular_classification.py
+++ b/test/test_pipeline/test_tabular_classification.py
@@ -33,7 +33,8 @@
 
 @pytest.fixture
 def exclude():
-    return {'feature_preprocessor': ['SelectRatesClassification', 'SelectPercentileClassification'], 'network_embedding': ['LearnedEntityEmbedding']}
+    return {'feature_preprocessor': ['SelectRatesClassification', 'SelectPercentileClassification'],
+            'network_embedding': ['LearnedEntityEmbedding']}
 
 
 @pytest.mark.parametrize("fit_dictionary_tabular", ['classification_categorical_only',
@@ -117,8 +118,8 @@ def test_pipeline_predict(self, fit_dictionary_tabular, exclude):
             pipeline.fit(fit_dictionary_tabular)
 
         # we expect the output to have the same batch size as the test input,
-        # and number of outputs per batch sample equal to the number of outputs
-        expected_output_shape = (X.shape[0], fit_dictionary_tabular["dataset_properties"]["output_shape"])
+        # and number of outputs per batch sample equal to 1
+        expected_output_shape = (X.shape[0], )
 
         prediction = pipeline.predict(X)
         assert isinstance(prediction, np.ndarray)
@@ -429,9 +430,9 @@ def test_trainer_cocktails(self, fit_dictionary_tabular, mocker, lr_scheduler, t
                len(X['network_snapshots']) == config.get(f'trainer:{trainer}:se_lastk')
 
         mocker.patch("autoPyTorch.pipeline.components.setup.network.base_network.NetworkComponent._predict",
-                     return_value=torch.Tensor([1]))
+                     return_value=torch.Tensor([[1, 0]]))
         # Assert that predict gives no error when swa and se are on
-        assert isinstance(pipeline.predict(fit_dictionary_tabular['X_train']), np.ndarray)
+        assert isinstance(pipeline.predict(X['X_train']), np.ndarray)
         # As SE is True, _predict should be called 3 times
         assert pipeline.named_steps['network']._predict.call_count == 3
 
@@ -590,8 +591,8 @@ def test_train_pipeline_with_runtime(fit_dictionary_tabular_dummy):
     # There is no epoch limitation
     assert not budget_tracker.is_max_epoch_reached(epoch=np.inf)
 
-    # More than 200 epochs would have pass in 5 seconds for this dataset
-    assert len(run_summary.performance_tracker['start_time']) > 100
+    # More than 50 epochs would have pass in 5 seconds for this dataset
+    assert len(run_summary.performance_tracker['start_time']) > 50
 
 
 @pytest.mark.parametrize("fit_dictionary_tabular_dummy", ["classification"], indirect=True)
diff --git a/test/test_pipeline/test_tabular_regression.py b/test/test_pipeline/test_tabular_regression.py
index a2c3b695e..7b3bceca5 100644
--- a/test/test_pipeline/test_tabular_regression.py
+++ b/test/test_pipeline/test_tabular_regression.py
@@ -61,11 +61,10 @@ def test_pipeline_fit(self, fit_dictionary_tabular):
         """This test makes sure that the pipeline is able to fit
         given random combinations of hyperparameters across the pipeline"""
         # TODO: fix issue where adversarial also works for regression
-        # TODO: Fix issue with learned entity embedding after preprocessing PR
+
         pipeline = TabularRegressionPipeline(
             dataset_properties=fit_dictionary_tabular['dataset_properties'],
-            exclude={'trainer': ['AdversarialTrainer'],
-                     'network_embedding': ['LearnedEntityEmbedding']})
+            exclude={'trainer': ['AdversarialTrainer']})
         cs = pipeline.get_hyperparameter_search_space()
 
         config = cs.sample_configuration()
@@ -91,8 +90,7 @@ def test_pipeline_predict(self, fit_dictionary_tabular):
         X = fit_dictionary_tabular['X_train'].copy()
         pipeline = TabularRegressionPipeline(
             dataset_properties=fit_dictionary_tabular['dataset_properties'],
-            exclude={'trainer': ['AdversarialTrainer'],
-                     'network_embedding': ['LearnedEntityEmbedding']})
+            exclude={'trainer': ['AdversarialTrainer']})
 
         cs = pipeline.get_hyperparameter_search_space()
         config = cs.sample_configuration()
@@ -121,8 +119,7 @@ def test_pipeline_transform(self, fit_dictionary_tabular):
 
         pipeline = TabularRegressionPipeline(
             dataset_properties=fit_dictionary_tabular['dataset_properties'],
-            exclude={'trainer': ['AdversarialTrainer'],
-                     'network_embedding': ['LearnedEntityEmbedding']})
+            exclude={'trainer': ['AdversarialTrainer']})
         cs = pipeline.get_hyperparameter_search_space()
         config = cs.sample_configuration()
         pipeline.set_hyperparameters(config)
@@ -139,11 +136,10 @@ def test_pipeline_transform(self, fit_dictionary_tabular):
         assert fit_dictionary_tabular.items() <= transformed_fit_dictionary_tabular.items()
 
         # Then the pipeline should have added the following keys
-        # Removing 'imputer', 'encoder', 'scaler', these will be
-        # TODO: added back after a PR fixing preprocessing
         expected_keys = {'tabular_transformer', 'preprocess_transforms', 'network',
                          'optimizer', 'lr_scheduler', 'train_data_loader',
-                         'val_data_loader', 'run_summary', 'feature_preprocessor'}
+                         'val_data_loader', 'run_summary', 'feature_preprocessor',
+                         'imputer', 'encoder', 'scaler'}
         assert expected_keys.issubset(set(transformed_fit_dictionary_tabular.keys()))
 
         # Then we need to have transformations being created.
@@ -152,13 +148,10 @@ def test_pipeline_transform(self, fit_dictionary_tabular):
         # We expect the transformations to be in the pipeline at anytime for inference
         assert 'preprocess_transforms' in transformed_fit_dictionary_tabular.keys()
 
-    @pytest.mark.parametrize("is_small_preprocess", [True, False])
-    def test_default_configuration(self, fit_dictionary_tabular, is_small_preprocess):
+    def test_default_configuration(self, fit_dictionary_tabular):
         """Makes sure that when no config is set, we can trust the
         default configuration from the space"""
 
-        fit_dictionary_tabular['is_small_preprocess'] = is_small_preprocess
-
         pipeline = TabularRegressionPipeline(
             dataset_properties=fit_dictionary_tabular['dataset_properties'],
             exclude={'trainer': ['AdversarialTrainer']})