Add modeling module

pull/95/head
Meng Li 2020-04-29 18:53:54 -04:00
parent b688a0827f
commit 7cbb227214
5 changed files with 386 additions and 10 deletions

View File

@ -73,22 +73,35 @@ rule all:
segment = config["WIFI"]["DAY_SEGMENTS"]),
# Models
expand("data/processed/{pid}/data_for_individual_model/{source}_{day_segment}_original.csv",
pid = config["PIDS"],
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
pid = config["PIDS"],
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
expand("data/processed/data_for_population_model/{source}_{day_segment}_original.csv",
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
expand("data/processed/{pid}/data_for_individual_model/{source}_{day_segment}_clean.csv",
pid = config["PIDS"],
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
pid = config["PIDS"],
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
expand("data/processed/data_for_population_model/{source}_{day_segment}_clean.csv",
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"]),
expand("data/processed/data_for_population_model/demographic_features.csv"),
expand("data/processed/data_for_population_model/targets_{summarised}.csv",
summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"]),
expand("data/processed/output_population_model/{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{model}/{cv_method}/{source}_{day_segment}_{summarised}_{scaler}/{result_component}.csv",
rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"],
cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"],
days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"],
days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"],
cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"],
model = config["PARAMS_FOR_ANALYSIS"]["MODEL_NAMES"],
cv_method = config["PARAMS_FOR_ANALYSIS"]["CV_METHODS"],
source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"],
day_segment = config["PARAMS_FOR_ANALYSIS"]["DAY_SEGMENTS"],
summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"],
scaler = config["PARAMS_FOR_ANALYSIS"]["SCALER"],
result_component = config["PARAMS_FOR_ANALYSIS"]["RESULT_COMPONENTS"]),
# Vizualisations
expand("reports/figures/{pid}/{sensor}_heatmap_rows.html", pid=config["PIDS"], sensor=config["SENSORS"]),
expand("reports/figures/{pid}/compliance_heatmap.html", pid=config["PIDS"]),

View File

@ -136,6 +136,7 @@ PARAMS_FOR_ANALYSIS:
FITBIT_FEATURES: [fitbit_heartrate, fitbit_step]
PHONE_FITBIT_FEATURES: "" # This array is merged in the input_merge_features_of_single_participant function in models.snakefile
DEMOGRAPHIC_FEATURES: [age, gender, inpatientdays]
CATEGORICAL_DEMOGRAPHIC_FEATURES: ["gender"]
# Whether or not to include only days with enough valid sensed hours
# logic can be found in rule phone_valid_sensed_days of rules/preprocessing.snakefile
@ -157,7 +158,34 @@ PARAMS_FOR_ANALYSIS:
PARTICIPANT_DAYS_BEFORE_THRESHOLD: 7
PARTICIPANT_DAYS_AFTER_THRESHOLD: 4
# Extract summarised features from daily features with any of the following substrings
NUMERICAL_OPERATORS: ["count", "sum", "length", "avg"]
CATEGORICAL_OPERATORS: ["mostcommon"]
MODEL_NAMES: ["LogReg", "kNN", "SVM", "DT", "RF", "GB", "XGBoost", "LightGBM"]
CV_METHODS: ["LeaveOneOut"]
SUMMARISED: ["summarised"] # "summarised" or "notsummarised"
SCALER: ["notnormalized", "minmaxscaler", "standardscaler", "robustscaler"]
RESULT_COMPONENTS: ["fold_predictions", "fold_metrics", "overall_results", "fold_feature_importances"]
MODEL_HYPERPARAMS:
LogReg:
{"clf__C": [0.01, 0.1, 1, 10, 100], "clf__solver": ["newton-cg", "lbfgs", "liblinear", "saga"], "clf__penalty": ["l2"]}
kNN:
{"clf__n_neighbors": range(1, 21, 2), "clf__weights": ["uniform", "distance"], "clf__metric": ["euclidean", "manhattan", "minkowski"]}
SVM:
{"clf__C": [0.01, 0.1, 1, 10, 100], "clf__gamma": ["scale", "auto"], "clf__kernel": ["rbf", "poly", "sigmoid"]}
DT:
{"clf__criterion": ["gini", "entropy"], "clf__max_depth": [None, 3, 5, 7, 9], "clf__max_features": [None, "auto", "sqrt", "log2"]}
RF:
{"clf__n_estimators": [2, 5, 10, 100],"clf__max_depth": [None, 3, 5, 7, 9]}
GB:
{"clf__learning_rate": [0.01, 0.1, 1], "clf__n_estimators": [5, 10, 100, 200], "clf__subsample": [0.5, 0.7, 1.0], "clf__max_depth": [3, 5, 7, 9]}
XGBoost:
{"clf__learning_rate": [0.01, 0.1, 1], "clf__n_estimators": [5, 10, 100, 200], "clf__num_leaves": [5, 16, 31, 62]}
LightGBM:
{"clf__learning_rate": [0.01, 0.1, 1], "clf__n_estimators": [5, 10, 100, 200], "clf__num_leaves": [5, 16, 31, 62]}
# Target Settings:
# 1 => TARGETS_RATIO_THRESHOLD (ceiling) or more of available CESD scores were TARGETS_VALUE_THRESHOLD or higher; 0 => otherwise

View File

@ -85,3 +85,28 @@ rule clean_features_for_population_model:
script:
"../src/models/clean_features_for_model.R"
rule modeling:
input:
cleaned_features = "data/processed/data_for_population_model/{source}_{day_segment}_clean.csv",
demographic_features = "data/processed/data_for_population_model/demographic_features.csv",
targets = "data/processed/data_for_population_model/targets_{summarised}.csv",
params:
model = "{model}",
cv_method = "{cv_method}",
source = "{source}",
day_segment = "{day_segment}",
summarised = "{summarised}",
scaler = "{scaler}",
cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"],
numerical_operators = config["PARAMS_FOR_ANALYSIS"]["NUMERICAL_OPERATORS"],
categorical_operators = config["PARAMS_FOR_ANALYSIS"]["CATEGORICAL_OPERATORS"],
categorical_demographic_features = config["PARAMS_FOR_ANALYSIS"]["CATEGORICAL_DEMOGRAPHIC_FEATURES"],
model_hyperparams = config["PARAMS_FOR_ANALYSIS"]["MODEL_HYPERPARAMS"],
rowsnan_colsnan_days_colsvar_threshold = "{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}"
output:
fold_predictions = "data/processed/output_population_model/{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{model}/{cv_method}/{source}_{day_segment}_{summarised}_{scaler}/fold_predictions.csv",
fold_metrics = "data/processed/output_population_model/{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{model}/{cv_method}/{source}_{day_segment}_{summarised}_{scaler}/fold_metrics.csv",
overall_results = "data/processed/output_population_model/{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{model}/{cv_method}/{source}_{day_segment}_{summarised}_{scaler}/overall_results.csv",
fold_feature_importances = "data/processed/output_population_model/{rows_nan_threshold}_{cols_nan_threshold}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{model}/{cv_method}/{source}_{day_segment}_{summarised}_{scaler}/fold_feature_importances.csv"
script:
"../src/models/modeling.py"

View File

@ -0,0 +1,172 @@
import pandas as pd
from modeling_utils import dropZeroVarianceCols, getNormAllParticipantsScaler, getMetrics, getFeatureImportances, createPipeline
from sklearn.model_selection import train_test_split, LeaveOneOut, GridSearchCV, cross_val_score, KFold
def getMatchingColNames(operators, features):
col_names = []
for col in features.columns:
if any(operator in col for operator in operators):
col_names.append(col)
return col_names
def summarisedNumericalFeatures(col_names, features):
numerical_features = features.groupby(["pid"])[col_names].var()
numerical_features.columns = numerical_features.columns.str.replace("daily", "overallvar")
return numerical_features
def summarisedCategoricalFeatures(col_names, features):
categorical_features = features.groupby(["pid"])[col_names].agg(lambda x: int(pd.Series.mode(x)[0]))
categorical_features.columns = categorical_features.columns.str.replace("daily", "overallmode")
return categorical_features
def summariseFeatures(features, numerical_operators, categorical_operators, cols_var_threshold):
numerical_col_names = getMatchingColNames(numerical_operators, features)
categorical_col_names = getMatchingColNames(categorical_operators, features)
numerical_features = summarisedNumericalFeatures(numerical_col_names, features)
categorical_features = summarisedCategoricalFeatures(categorical_col_names, features)
features = pd.concat([numerical_features, categorical_features], axis=1)
if cols_var_threshold: # double check the categorical features
features = dropZeroVarianceCols(features)
return features
def preprocessNumericalFeatures(train_numerical_features, test_numerical_features, scaler, flag):
# fillna with mean
if flag == "train":
numerical_features = train_numerical_features.fillna(train_numerical_features.mean())
elif flag == "test":
numerical_features = test_numerical_features.fillna(train_numerical_features.mean())
else:
raise ValueError("flag should be 'train' or 'test'")
# normalize
if scaler != "notnormalized":
scaler = getNormAllParticipantsScaler(train_numerical_features, scaler)
numerical_features = pd.DataFrame(scaler.transform(numerical_features), index=numerical_features.index, columns=numerical_features.columns)
return numerical_features
def preprocessCategoricalFeatures(categorical_features, mode_categorical_features):
# fillna with mode
categorical_features = categorical_features.fillna(mode_categorical_features)
# one-hot encoding
categorical_features = categorical_features.apply(lambda col: col.astype("category"))
categorical_features = pd.get_dummies(categorical_features)
return categorical_features
def splitNumericalCategoricalFeatures(features, categorical_feature_colnames):
numerical_features = features.drop(categorical_feature_colnames, axis=1)
categorical_features = features[categorical_feature_colnames].copy()
return numerical_features, categorical_features
def preprocesFeatures(train_numerical_features, test_numerical_features, categorical_features, mode_categorical_features, scaler, flag):
numerical_features = preprocessNumericalFeatures(train_numerical_features, test_numerical_features, scaler, flag)
categorical_features = preprocessCategoricalFeatures(categorical_features, mode_categorical_features)
features = pd.concat([numerical_features, categorical_features], axis=1)
return features
##############################################################
# Summary of the workflow
# Step 1. Read parameters, features and targets
# Step 2. Extract summarised features based on daily features
# Step 3. Create pipeline
# Step 4. Nested cross validation
# Step 5. Model evaluation
# Step 6. Save results, parameters, and metrics to CSV files
##############################################################
# Step 1. Read parameters, features and targets
# Read parameters
model = snakemake.params["model"]
source = snakemake.params["source"]
summarised = snakemake.params["summarised"]
day_segment = snakemake.params["day_segment"]
scaler = snakemake.params["scaler"]
cv_method = snakemake.params["cv_method"]
cols_var_threshold = snakemake.params["cols_var_threshold"]
numerical_operators = snakemake.params["numerical_operators"]
categorical_operators = snakemake.params["categorical_operators"]
categorical_colnames_demographic_features = snakemake.params["categorical_demographic_features"]
model_hyperparams = snakemake.params["model_hyperparams"][model]
rowsnan_colsnan_days_colsvar_threshold = snakemake.params["rowsnan_colsnan_days_colsvar_threshold"] # thresholds for data cleaning
# Read features and targets
demographic_features = pd.read_csv(snakemake.input["demographic_features"], index_col=["pid"])
targets = pd.read_csv(snakemake.input["targets"], index_col=["pid"])
features = pd.read_csv(snakemake.input["cleaned_features"], parse_dates=["local_date"])
# Step 2. Extract summarised features based on daily features:
# for categorical features: calculate variance across all days
# for numerical features: calculate mode across all days
if summarised == "summarised":
features = summariseFeatures(features, numerical_operators, categorical_operators, cols_var_threshold)
categorical_feature_colnames = categorical_colnames_demographic_features + getMatchingColNames(categorical_operators, features)
data = pd.concat([features, demographic_features, targets], axis=1, join="inner")
data_x, data_y = data.drop("target", axis=1), data[["target"]]
# Step 3. Create pipeline
pipeline = createPipeline(model)
cv_class = globals()[cv_method]
inner_cv = cv_class()
outer_cv = cv_class()
# Step 4. Nested cross validation
fold_id, pid, best_params, true_y, pred_y, pred_y_prob = [], [], [], [], [], []
feature_importances_all_folds = pd.DataFrame()
fold_count = 1
# Outer cross validation
for train_index, test_index in outer_cv.split(data_x):
# Split train and test, numerical and categorical features
train_x, test_x = data_x.iloc[train_index], data_x.iloc[test_index]
train_numerical_features, train_categorical_features = splitNumericalCategoricalFeatures(train_x, categorical_feature_colnames)
train_y, test_y = data_y.iloc[train_index], data_y.iloc[test_index]
test_numerical_features, test_categorical_features = splitNumericalCategoricalFeatures(test_x, categorical_feature_colnames)
# Preprocess: impute and normalize
mode_categorical_features = train_categorical_features.mode().iloc[0]
train_x = preprocesFeatures(train_numerical_features, None, train_categorical_features, mode_categorical_features, scaler, "train")
test_x = preprocesFeatures(train_numerical_features, test_numerical_features, test_categorical_features, mode_categorical_features, scaler, "test")
train_x, test_x = train_x.align(test_x, join='outer', axis=1, fill_value=0) # in case we get rid off categorical columns
# Compute number of participants, features and proportion of missing value cells among all features,
# values do not change between folds
if fold_count == 1:
num_of_participants = train_x.shape[0] + test_x.shape[0]
num_of_features = train_x.shape[1]
nan_ratio = (train_x.isnull().sum().sum() + test_x.isnull().sum().sum()) / ((train_x.shape[0] + test_x.shape[0]) * train_x.shape[1])
# Inner cross validation
clf = GridSearchCV(estimator=pipeline, param_grid=model_hyperparams, cv=inner_cv, scoring="f1_micro")
clf.fit(train_x, train_y.values.ravel())
# Collect results and parameters
best_params = best_params + [clf.best_params_]
pred_y = pred_y + clf.predict(test_x).tolist()
pred_y_prob = pred_y_prob + clf.predict_proba(test_x)[:, 1].tolist()
true_y = true_y + test_y.values.ravel().tolist()
pid = pid + test_y.index.tolist() # each test partition (fold) in the outer cv is a participant (LeaveOneOut cv)
feature_importances_current_fold = getFeatureImportances(model, clf.best_estimator_.steps[1][1], train_x.columns)
feature_importances_all_folds = pd.concat([feature_importances_all_folds, feature_importances_current_fold], sort=False, axis=0)
fold_id.append(fold_count)
fold_count = fold_count + 1
# Step 5. Model evaluation
acc, pre1, recall1, f11, auc, kappa = getMetrics(pred_y, pred_y_prob, true_y)
# Step 6. Save results, parameters, and metrics to CSV files
fold_predictions = pd.DataFrame({"fold_id": fold_id, "pid": pid, "hyperparameters": best_params, "true_y": true_y, "pred_y": pred_y, "pred_y_prob": pred_y_prob})
fold_metrics = pd.DataFrame({"fold_id":[], "accuracy":[], "precision1": [], "recall1": [], "f11": [], "auc": [], "kappa": []})
overall_results = pd.DataFrame({"num_of_participants": [num_of_participants], "num_of_features": [num_of_features], "nan_ratio": [nan_ratio], "rowsnan_colsnan_days_colsvar_threshold": [rowsnan_colsnan_days_colsvar_threshold], "model": [model], "cv_method": [cv_method], "source": [source], "scaler": [scaler], "day_segment": [day_segment], "summarised": [summarised], "accuracy": [acc], "precision1": [pre1], "recall1": [recall1], "f11": [f11], "auc": [auc], "kappa": [kappa]})
feature_importances_all_folds.insert(loc=0, column='fold_id', value=fold_id)
feature_importances_all_folds.insert(loc=1, column='pid', value=pid)
fold_predictions.to_csv(snakemake.output["fold_predictions"], index=False)
fold_metrics.to_csv(snakemake.output["fold_metrics"], index=False)
overall_results.to_csv(snakemake.output["overall_results"], index=False)
feature_importances_all_folds.to_csv(snakemake.output["fold_feature_importances"], index=False)

View File

@ -0,0 +1,138 @@
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from imblearn.over_sampling import SMOTE
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import cohen_kappa_score, roc_auc_score
from imblearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
# drop columns with zero variance
def dropZeroVarianceCols(data):
if not data.empty:
var_df = data.var()
keep_col = []
for col in var_df.index:
if var_df.loc[col] > 0:
keep_col.append(col)
data_drop_cols_var = data.loc[:, keep_col]
else:
data_drop_cols_var = data
return data_drop_cols_var
# normalize based on all participants: return fitted scaler
def getNormAllParticipantsScaler(features, scaler_flag):
# MinMaxScaler
if scaler_flag == "minmaxscaler":
scaler = MinMaxScaler()
# StandardScaler
elif scaler_flag == "standardscaler":
scaler = StandardScaler()
# RobustScaler
elif scaler_flag == "robustscaler":
scaler = RobustScaler()
else:
# throw exception
raise ValueError("The normalization method is not predefined, please check if the PARAMS_FOR_ANALYSIS.NORMALIZED in config.yaml file is correct.")
scaler.fit(features)
return scaler
# get metrics: accuracy, precision1, recall1, f11, auc, kappa
def getMetrics(pred_y, pred_y_prob, true_y):
acc = accuracy_score(true_y, pred_y)
pre1 = precision_score(true_y, pred_y, average=None, labels=[0,1])[1]
recall1 = recall_score(true_y, pred_y, average=None, labels=[0,1])[1]
f11 = f1_score(true_y, pred_y, average=None, labels=[0,1])[1]
auc = roc_auc_score(true_y, pred_y_prob)
kappa = cohen_kappa_score(true_y, pred_y)
return acc, pre1, recall1, f11, auc, kappa
# get feature importances
def getFeatureImportances(model, clf, cols):
if model == "LogReg":
# Extract the coefficient of the features in the decision function
# Calculate the absolute value
# Normalize it to sum 1
feature_importances = pd.DataFrame(zip(clf.coef_[0],cols), columns=["Value", "Feature"])
feature_importances["Value"] = feature_importances["Value"].abs()/feature_importances["Value"].abs().sum()
elif model == "kNN":
# Feature importance is not defined for the KNN Classification, return an empty dataframe
feature_importances = pd.DataFrame(columns=["Value", "Feature"])
elif model == "SVM":
# Coefficient of the features are only available for linear kernel
try:
# For linear kernel
# Extract the coefficient of the features in the decision function
# Calculate the absolute value
# Normalize it to sum 1
feature_importances = pd.DataFrame(zip(clf.coef_[0],cols), columns=["Value", "Feature"])
feature_importances["Value"] = feature_importances["Value"].abs()/feature_importances["Value"].abs().sum()
except:
# For nonlinear kernel, return an empty dataframe directly
feature_importances = pd.DataFrame(columns=["Value", "Feature"])
elif model == "LightGBM":
# Extract feature_importances_ and normalize it to sum 1
feature_importances = pd.DataFrame(zip(clf.feature_importances_,cols), columns=["Value", "Feature"])
feature_importances["Value"] = feature_importances["Value"]/feature_importances["Value"].sum()
else:
# For DT, RF, GB, XGBoost classifier, extract feature_importances_. This field has already been normalized.
feature_importances = pd.DataFrame(zip(clf.feature_importances_,cols), columns=["Value", "Feature"])
feature_importances = feature_importances.set_index(["Feature"]).T
return feature_importances
def createPipeline(model):
if model == "LogReg":
from sklearn.linear_model import LogisticRegression
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", LogisticRegression(random_state=0))
])
elif model == "kNN":
from sklearn.neighbors import KNeighborsClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", KNeighborsClassifier())
])
elif model == "SVM":
from sklearn.svm import SVC
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", SVC(random_state=0, probability=True))
])
elif model == "DT":
from sklearn.tree import DecisionTreeClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", DecisionTreeClassifier(random_state=0))
])
elif model == "RF":
from sklearn.ensemble import RandomForestClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", RandomForestClassifier(random_state=0))
])
elif model == "GB":
from sklearn.ensemble import GradientBoostingClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", GradientBoostingClassifier(random_state=0))
])
elif model == "XGBoost":
from xgboost import XGBClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", XGBClassifier(random_state=0))
])
elif model == "LightGBM":
from lightgbm import LGBMClassifier
pipeline = Pipeline([
("sampling", SMOTE(sampling_strategy="minority", random_state=0)),
("clf", LGBMClassifier(random_state=0))
])
else:
raise ValueError("RAPIDS pipeline only support LogReg, kNN, SVM, DT, RF, GB, XGBoost, and LightGBM algorithms for classification problems.")
return pipeline