Handle clustering classification the same as other classification models again.
parent
c51e0da0f7
commit
aa13123136
|
@ -14,9 +14,7 @@
|
|||
# ---
|
||||
|
||||
# %% jupyter={"source_hidden": true}
|
||||
# %matplotlib inline
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
@ -27,11 +25,6 @@ from sklearn.model_selection import LeaveOneGroupOut, StratifiedKFold, cross_val
|
|||
|
||||
from machine_learning.classification_models import ClassificationModels
|
||||
|
||||
nb_dir = os.path.split(os.getcwd())[0]
|
||||
if nb_dir not in sys.path:
|
||||
sys.path.append(nb_dir)
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # RAPIDS models
|
||||
|
||||
|
@ -42,10 +35,30 @@ CV_METHOD = "logo" # logo, halflogo, 5kfold
|
|||
# Cross-validation method (could be regarded as a hyperparameter)
|
||||
N_SL = 1 # Number of largest/smallest accuracies (of particular CV) outputs
|
||||
|
||||
# %%
|
||||
PATH_BASE = Path("E:/STRAWresults/20230415")
|
||||
|
||||
SEGMENT_TYPE = "period"
|
||||
print("SEGMENT_TYPE: " + SEGMENT_TYPE)
|
||||
SEGMENT_LENGTH = "30_minutes_before"
|
||||
print("SEGMENT_LENGTH: " + SEGMENT_LENGTH)
|
||||
TARGET_VARIABLE = "appraisal_stressfulness"
|
||||
print("TARGET_VARIABLE: " + TARGET_VARIABLE)
|
||||
|
||||
if ("appraisal" in TARGET_VARIABLE) and ("stressfulness" in TARGET_VARIABLE):
|
||||
TARGET_VARIABLE += "_"
|
||||
TARGET_VARIABLE += SEGMENT_TYPE
|
||||
|
||||
PATH_FULL = PATH_BASE / SEGMENT_LENGTH / ("input_" + TARGET_VARIABLE + "_mean.csv")
|
||||
|
||||
model_input = pd.read_csv(PATH_FULL)
|
||||
|
||||
if SEGMENT_LENGTH == "daily":
|
||||
DAY_LENGTH = "daily" # or "working"
|
||||
print(DAY_LENGTH)
|
||||
model_input = model_input[model_input["local_segment"].str.contains(DAY_LENGTH)]
|
||||
|
||||
# %% jupyter={"source_hidden": true}
|
||||
model_input = pd.read_csv(
|
||||
"E:/STRAWresults/20230415/30_minutes_before/input_PANAS_negative_affect_mean.csv"
|
||||
)
|
||||
index_columns = [
|
||||
"local_segment",
|
||||
"local_segment_label",
|
||||
|
@ -53,10 +66,13 @@ index_columns = [
|
|||
"local_segment_end_datetime",
|
||||
]
|
||||
|
||||
lime_col = "limesurvey_demand_control_ratio_quartile"
|
||||
clust_col = lime_col
|
||||
CLUST_COL = "limesurvey_demand_control_ratio_quartile"
|
||||
print("CLUST_COL: " + CLUST_COL)
|
||||
|
||||
model_input[clust_col].describe()
|
||||
BINS = [-1, 0, 4]
|
||||
print("BINS: " + str(BINS))
|
||||
|
||||
model_input[CLUST_COL].describe()
|
||||
|
||||
|
||||
# %% jupyter={"source_hidden": true}
|
||||
|
@ -64,9 +80,9 @@ model_input[clust_col].describe()
|
|||
# Filter-out outlier rows by clust_col
|
||||
# model_input = model_input[(np.abs(stats.zscore(model_input[clust_col])) < 3)]
|
||||
|
||||
uniq = model_input[[clust_col, "pid"]].drop_duplicates().reset_index(drop=True)
|
||||
uniq = model_input[[CLUST_COL, "pid"]].drop_duplicates().reset_index(drop=True)
|
||||
uniq = uniq.dropna()
|
||||
plt.bar(uniq["pid"], uniq[clust_col])
|
||||
plt.bar(uniq["pid"], uniq[CLUST_COL])
|
||||
|
||||
# %% jupyter={"source_hidden": true}
|
||||
# Get clusters by cluster col & and merge the clusters to main df
|
||||
|
@ -87,15 +103,14 @@ cmodels = cm.get_cmodels()
|
|||
# %% jupyter={"source_hidden": true}
|
||||
for k in range(N_CLUSTERS):
|
||||
model_input_subset = model_input[model_input["cluster"] == k].copy()
|
||||
bins = [-1, 1, 2, 4] # bins for z-scored targets
|
||||
model_input_subset.loc[:, "target"] = pd.cut(
|
||||
model_input_subset.loc[:, "target"],
|
||||
bins=bins,
|
||||
labels=["low", "medium", "high"],
|
||||
bins=BINS,
|
||||
labels=["low", "high"],
|
||||
right=False,
|
||||
) # ['low', 'medium', 'high']
|
||||
model_input_subset["target"].value_counts()
|
||||
model_input_subset = model_input_subset[model_input_subset["target"] != "medium"]
|
||||
# model_input_subset = model_input_subset[model_input_subset["target"] != "medium"]
|
||||
model_input_subset["target"] = (
|
||||
model_input_subset["target"].astype(str).apply(lambda x: 0 if x == "low" else 1)
|
||||
)
|
||||
|
@ -206,11 +221,17 @@ for k in range(N_CLUSTERS):
|
|||
scores = cm.get_total_models_scores(n_clusters=N_CLUSTERS)
|
||||
|
||||
# %%
|
||||
scores.to_csv(
|
||||
"../presentation/results/PANAS_negative_affect_30min_classification_"
|
||||
PATH_OUTPUT = Path("..") / Path("presentation/results")
|
||||
path_output_full = PATH_OUTPUT / (
|
||||
TARGET_VARIABLE
|
||||
+ "_"
|
||||
+ SEGMENT_LENGTH
|
||||
+ "_classification_"
|
||||
+ CV_METHOD
|
||||
+ str(BINS)
|
||||
+ "_clust_"
|
||||
+ CLUST_COL
|
||||
+ str(N_CLUSTERS)
|
||||
+ ".csv",
|
||||
index=False,
|
||||
+ ".csv"
|
||||
)
|
||||
scores.to_csv(path_output_full, index=False)
|
||||
|
|
Loading…
Reference in New Issue