Extend ml pipeline scripts with two additional CV methods.
parent
b59798df26
commit
8defb271c9
|
@ -72,11 +72,20 @@ sns.heatmap(model_input.sort_values('pid').set_index('pid').isna(), cbar=False)
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
||||||
#if "pid" in model_input.columns:
|
|
||||||
# index_columns.append("pid")
|
|
||||||
model_input.set_index(index_columns, inplace=True)
|
model_input.set_index(index_columns, inplace=True)
|
||||||
|
|
||||||
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
cv_method = '5kfold'
|
||||||
|
if cv_method == 'half_logo':
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
||||||
|
else:
|
||||||
|
model_input['pid_index'] = model_input.groupby('pid').cumcount()
|
||||||
|
model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count')
|
||||||
|
|
||||||
|
model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round()
|
||||||
|
model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str)
|
||||||
|
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"]
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
categorical_feature_colnames = ["gender", "startlanguage"]
|
categorical_feature_colnames = ["gender", "startlanguage"]
|
||||||
|
@ -114,6 +123,10 @@ logo.get_n_splits(
|
||||||
groups=data_groups,
|
groups=data_groups,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Defaults to 5 k folds in cross_validate method
|
||||||
|
if cv_method != 'logo' and cv_method != 'half_logo':
|
||||||
|
logo = None
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
sum(data_y.isna())
|
sum(data_y.isna())
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ import machine_learning.model
|
||||||
# ## PANAS negative affect
|
# ## PANAS negative affect
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
model_input = pd.read_csv("../data/intradaily_30_min_all_targets/input_PANAS_negative_affect_mean.csv")
|
model_input = pd.read_csv("../data/intradaily_30_min_all_targets/input_JCQ_job_demand_mean.csv")
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
||||||
|
@ -58,7 +58,17 @@ index_columns = ["local_segment", "local_segment_label", "local_segment_start_da
|
||||||
# index_columns.append("pid")
|
# index_columns.append("pid")
|
||||||
model_input.set_index(index_columns, inplace=True)
|
model_input.set_index(index_columns, inplace=True)
|
||||||
|
|
||||||
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
cv_method = '5kfold'
|
||||||
|
if cv_method == 'half_logo':
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
||||||
|
else:
|
||||||
|
model_input['pid_index'] = model_input.groupby('pid').cumcount()
|
||||||
|
model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count')
|
||||||
|
|
||||||
|
model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round()
|
||||||
|
model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str)
|
||||||
|
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"]
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
categorical_feature_colnames = ["gender", "startlanguage"]
|
categorical_feature_colnames = ["gender", "startlanguage"]
|
||||||
|
@ -98,6 +108,10 @@ logo.get_n_splits(
|
||||||
groups=data_groups,
|
groups=data_groups,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Defaults to 5 k folds in cross_validate method
|
||||||
|
if cv_method != 'logo' and cv_method != 'half_logo':
|
||||||
|
logo = None
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
sum(data_y.isna())
|
sum(data_y.isna())
|
||||||
|
|
||||||
|
|
|
@ -53,12 +53,25 @@ import machine_learning.model
|
||||||
model_input = pd.read_csv("../data/stressfulness_event/input_appraisal_stressfulness_event_mean.csv")
|
model_input = pd.read_csv("../data/stressfulness_event/input_appraisal_stressfulness_event_mean.csv")
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
|
|
||||||
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
||||||
#if "pid" in model_input.columns:
|
|
||||||
# index_columns.append("pid")
|
|
||||||
model_input.set_index(index_columns, inplace=True)
|
model_input.set_index(index_columns, inplace=True)
|
||||||
|
|
||||||
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
cv_method = 'half_logo'
|
||||||
|
if cv_method == 'logo':
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"]
|
||||||
|
else:
|
||||||
|
|
||||||
|
model_input[(model_input['pid'] == "p037") | (model_input['pid'] == "p064") | (model_input['pid'] == "p092")]
|
||||||
|
|
||||||
|
model_input['pid_index'] = model_input.groupby('pid').cumcount()
|
||||||
|
model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count')
|
||||||
|
|
||||||
|
model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round()
|
||||||
|
model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str)
|
||||||
|
|
||||||
|
data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"]
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
categorical_feature_colnames = ["gender", "startlanguage"]
|
categorical_feature_colnames = ["gender", "startlanguage"]
|
||||||
|
@ -97,12 +110,10 @@ logo.get_n_splits(
|
||||||
data_y,
|
data_y,
|
||||||
groups=data_groups,
|
groups=data_groups,
|
||||||
)
|
)
|
||||||
logo.split(
|
|
||||||
train_x,
|
|
||||||
data_y,
|
|
||||||
groups=data_groups,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
# Defaults to 5 k folds in cross_validate method
|
||||||
|
if cv_method != 'logo' and cv_method != 'half_logo':
|
||||||
|
logo = None
|
||||||
|
|
||||||
# %% jupyter={"source_hidden": true}
|
# %% jupyter={"source_hidden": true}
|
||||||
sum(data_y.isna())
|
sum(data_y.isna())
|
||||||
|
|
Loading…
Reference in New Issue