From 8defb271c97c07bc50f46baebb53cb8a6694b645 Mon Sep 17 00:00:00 2001 From: Primoz Date: Mon, 21 Nov 2022 11:23:47 +0100 Subject: [PATCH] Extend ml pipeline scripts with two additional CV methods. --- exploration/ml_pipeline_daily.py | 19 ++++++++++--- .../ml_pipeline_daily_cleaned_intradaily.py | 18 +++++++++++-- .../ml_pipeline_stress_event_cleaned.py | 27 +++++++++++++------ 3 files changed, 51 insertions(+), 13 deletions(-) diff --git a/exploration/ml_pipeline_daily.py b/exploration/ml_pipeline_daily.py index db4ab7e..e12cc1f 100644 --- a/exploration/ml_pipeline_daily.py +++ b/exploration/ml_pipeline_daily.py @@ -72,11 +72,20 @@ sns.heatmap(model_input.sort_values('pid').set_index('pid').isna(), cbar=False) # %% jupyter={"source_hidden": true} index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"] -#if "pid" in model_input.columns: -# index_columns.append("pid") + model_input.set_index(index_columns, inplace=True) -data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +cv_method = '5kfold' +if cv_method == 'half_logo': + data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +else: + model_input['pid_index'] = model_input.groupby('pid').cumcount() + model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count') + + model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round() + model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) + + data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"] # %% jupyter={"source_hidden": true} categorical_feature_colnames = ["gender", "startlanguage"] @@ -114,6 +123,10 @@ logo.get_n_splits( groups=data_groups, ) +# Defaults to 5 k folds in cross_validate method +if cv_method != 'logo' and cv_method != 'half_logo': + logo = None + # %% jupyter={"source_hidden": true} sum(data_y.isna()) diff --git a/exploration/ml_pipeline_daily_cleaned_intradaily.py b/exploration/ml_pipeline_daily_cleaned_intradaily.py index 3e27620..dccdfd8 100644 --- a/exploration/ml_pipeline_daily_cleaned_intradaily.py +++ b/exploration/ml_pipeline_daily_cleaned_intradaily.py @@ -50,7 +50,7 @@ import machine_learning.model # ## PANAS negative affect # %% jupyter={"source_hidden": true} -model_input = pd.read_csv("../data/intradaily_30_min_all_targets/input_PANAS_negative_affect_mean.csv") +model_input = pd.read_csv("../data/intradaily_30_min_all_targets/input_JCQ_job_demand_mean.csv") # %% jupyter={"source_hidden": true} index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"] @@ -58,7 +58,17 @@ index_columns = ["local_segment", "local_segment_label", "local_segment_start_da # index_columns.append("pid") model_input.set_index(index_columns, inplace=True) -data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +cv_method = '5kfold' +if cv_method == 'half_logo': + data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +else: + model_input['pid_index'] = model_input.groupby('pid').cumcount() + model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count') + + model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round() + model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) + + data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"] # %% jupyter={"source_hidden": true} categorical_feature_colnames = ["gender", "startlanguage"] @@ -98,6 +108,10 @@ logo.get_n_splits( groups=data_groups, ) +# Defaults to 5 k folds in cross_validate method +if cv_method != 'logo' and cv_method != 'half_logo': + logo = None + # %% jupyter={"source_hidden": true} sum(data_y.isna()) diff --git a/exploration/ml_pipeline_stress_event_cleaned.py b/exploration/ml_pipeline_stress_event_cleaned.py index 3b6cd6d..9bef7f9 100644 --- a/exploration/ml_pipeline_stress_event_cleaned.py +++ b/exploration/ml_pipeline_stress_event_cleaned.py @@ -53,12 +53,25 @@ import machine_learning.model model_input = pd.read_csv("../data/stressfulness_event/input_appraisal_stressfulness_event_mean.csv") # %% jupyter={"source_hidden": true} + index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"] -#if "pid" in model_input.columns: -# index_columns.append("pid") + model_input.set_index(index_columns, inplace=True) -data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +cv_method = 'half_logo' +if cv_method == 'logo': + data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +else: + + model_input[(model_input['pid'] == "p037") | (model_input['pid'] == "p064") | (model_input['pid'] == "p092")] + + model_input['pid_index'] = model_input.groupby('pid').cumcount() + model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count') + + model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round() + model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) + + data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"] # %% jupyter={"source_hidden": true} categorical_feature_colnames = ["gender", "startlanguage"] @@ -97,12 +110,10 @@ logo.get_n_splits( data_y, groups=data_groups, ) -logo.split( - train_x, - data_y, - groups=data_groups, -) +# Defaults to 5 k folds in cross_validate method +if cv_method != 'logo' and cv_method != 'half_logo': + logo = None # %% jupyter={"source_hidden": true} sum(data_y.isna())