Add feature preprocessing.
parent
c05b047c2d
commit
2fe1b37f55
|
@ -0,0 +1,87 @@
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from modelling_utils import get_matching_col_names, get_norm_all_participants_scaler
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_numerical_features(train_numerical_features, test_numerical_features, scaler, flag):
|
||||||
|
# fillna with mean
|
||||||
|
if flag == "train":
|
||||||
|
numerical_features = train_numerical_features.fillna(train_numerical_features.mean())
|
||||||
|
elif flag == "test":
|
||||||
|
numerical_features = test_numerical_features.fillna(train_numerical_features.mean())
|
||||||
|
else:
|
||||||
|
raise ValueError("flag should be 'train' or 'test'")
|
||||||
|
# normalize
|
||||||
|
if scaler != "notnormalized":
|
||||||
|
scaler = get_norm_all_participants_scaler(train_numerical_features, scaler)
|
||||||
|
numerical_features = pd.DataFrame(scaler.transform(numerical_features), index=numerical_features.index, columns=numerical_features.columns)
|
||||||
|
|
||||||
|
return numerical_features
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_categorical_features(categorical_features, mode_categorical_features):
|
||||||
|
# fillna with mode
|
||||||
|
categorical_features = categorical_features.fillna(mode_categorical_features)
|
||||||
|
# one-hot encoding
|
||||||
|
categorical_features = categorical_features.apply(lambda col: col.astype("category"))
|
||||||
|
if not categorical_features.empty:
|
||||||
|
categorical_features = pd.get_dummies(categorical_features)
|
||||||
|
return categorical_features
|
||||||
|
|
||||||
|
|
||||||
|
def split_numerical_categorical_features(features, categorical_feature_colnames):
|
||||||
|
numerical_features = features.drop(categorical_feature_colnames, axis=1)
|
||||||
|
categorical_features = features[categorical_feature_colnames].copy()
|
||||||
|
return numerical_features, categorical_features
|
||||||
|
|
||||||
|
def preproces_Features(train_numerical_features, test_numerical_features, categorical_features, mode_categorical_features, scaler, flag):
|
||||||
|
numerical_features = preprocess_numerical_features(train_numerical_features, test_numerical_features, scaler, flag)
|
||||||
|
categorical_features = preprocess_categorical_features(categorical_features, mode_categorical_features)
|
||||||
|
features = pd.concat([numerical_features, categorical_features], axis=1)
|
||||||
|
return features
|
||||||
|
|
||||||
|
##############################################################
|
||||||
|
# Summary of the workflow
|
||||||
|
# Step 1. Read parameters and data
|
||||||
|
# Step 2. Nested cross validation
|
||||||
|
# Step 3. Model evaluation
|
||||||
|
# Step 4. Save results, parameters, and metrics to CSV files
|
||||||
|
##############################################################
|
||||||
|
|
||||||
|
# For reproducibility
|
||||||
|
np.random.seed(0)
|
||||||
|
|
||||||
|
# Step 1. Read parameters and data
|
||||||
|
# Read parameters
|
||||||
|
model = snakemake.params["model"]
|
||||||
|
scaler = snakemake.params["scaler"]
|
||||||
|
cv_method = snakemake.params["cv_method"]
|
||||||
|
categorical_operators = snakemake.params["categorical_operators"]
|
||||||
|
categorical_colnames_demographic_features = snakemake.params["categorical_demographic_features"]
|
||||||
|
model_hyperparams = snakemake.params["model_hyperparams"][model]
|
||||||
|
|
||||||
|
# Read data and split
|
||||||
|
data = pd.read_csv(snakemake.input["data"])
|
||||||
|
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
||||||
|
if "pid" in data.columns:
|
||||||
|
index_columns.append("pid")
|
||||||
|
data.set_index(index_columns, inplace=True)
|
||||||
|
|
||||||
|
data_x, data_y = data.drop("target", axis=1), data[["target"]]
|
||||||
|
|
||||||
|
if "pid" in index_columns:
|
||||||
|
categorical_feature_colnames = categorical_colnames_demographic_features + get_matching_col_names(categorical_operators, data_x)
|
||||||
|
else:
|
||||||
|
categorical_feature_colnames = get_matching_col_names(categorical_operators, data_x)
|
||||||
|
|
||||||
|
# Split train and test, numerical and categorical features
|
||||||
|
train_x, test_x = data_x, data_x
|
||||||
|
train_numerical_features, train_categorical_features = split_numerical_categorical_features(train_x, categorical_feature_colnames)
|
||||||
|
train_y, test_y = data_y, data_y
|
||||||
|
test_numerical_features, test_categorical_features = split_numerical_categorical_features(test_x, categorical_feature_colnames)
|
||||||
|
|
||||||
|
# Preprocess: impute and normalize
|
||||||
|
mode_categorical_features = train_categorical_features.mode().iloc[0]
|
||||||
|
train_x = preproces_Features(train_numerical_features, None, train_categorical_features, mode_categorical_features, scaler, "train")
|
||||||
|
test_x = preproces_Features(train_numerical_features, test_numerical_features, test_categorical_features, mode_categorical_features, scaler, "test")
|
||||||
|
train_x, test_x = train_x.align(test_x, join="outer", axis=1, fill_value=0) # in case we get rid off categorical columns
|
|
@ -0,0 +1,27 @@
|
||||||
|
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
|
||||||
|
|
||||||
|
|
||||||
|
def get_matching_col_names(operators, features):
|
||||||
|
col_names = []
|
||||||
|
for col in features.columns:
|
||||||
|
if any(operator in col for operator in operators):
|
||||||
|
col_names.append(col)
|
||||||
|
return col_names
|
||||||
|
|
||||||
|
|
||||||
|
# normalize based on all participants: return fitted scaler
|
||||||
|
def get_norm_all_participants_scaler(features, scaler_flag):
|
||||||
|
# MinMaxScaler
|
||||||
|
if scaler_flag == "minmaxscaler":
|
||||||
|
scaler = MinMaxScaler()
|
||||||
|
# StandardScaler
|
||||||
|
elif scaler_flag == "standardscaler":
|
||||||
|
scaler = StandardScaler()
|
||||||
|
# RobustScaler
|
||||||
|
elif scaler_flag == "robustscaler":
|
||||||
|
scaler = RobustScaler()
|
||||||
|
else:
|
||||||
|
# throw exception
|
||||||
|
raise ValueError("The normalization method is not predefined, please check if the PARAMS_FOR_ANALYSIS.NORMALIZED in config.yaml file is correct.")
|
||||||
|
scaler.fit(features)
|
||||||
|
return scaler
|
Loading…
Reference in New Issue