# --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.13.0 # kernelspec: # display_name: straw2analysis # language: python # name: straw2analysis # --- # %% jupyter={"source_hidden": true} # %matplotlib inline import datetime import importlib import os import sys import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn import linear_model, svm, naive_bayes, neighbors, tree, ensemble from sklearn.model_selection import LeaveOneGroupOut, cross_validate from sklearn.dummy import DummyClassifier from sklearn.impute import SimpleImputer import xgboost as xg from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from pathlib import Path nb_dir = os.path.split(os.getcwd())[0] if nb_dir not in sys.path: sys.path.append(nb_dir) import machine_learning.labels import machine_learning.model from machine_learning.helper import run_all_classification_models # %% [markdown] # # RAPIDS models # %% [markdown] # ## Set script's parameters # # %% cv_method_str = 'logo' # logo, halflogo, 5kfold # Cross-validation method (could be regarded as a hyperparameter) n_sl = 1 # Number of largest/smallest accuracies (of particular CV) outputs # %% jupyter={"source_hidden": true} filename = Path("E:/STRAWresults/inputData/stressfulness_event/input_appraisal_stressfulness_event_mean.csv") model_input = pd.read_csv(filename) # %% jupyter={"source_hidden": true} index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"] model_input.set_index(index_columns, inplace=True) model_input['target'].value_counts() # %% jupyter={"source_hidden": true} bins = [-10, -1, 1, 10] # bins for z-scored targets # bins = [0, 1, 4] # bins for stressfulness (1-4) target model_input['target'], edges = pd.cut(model_input.target, bins=bins, labels=['low', 'medium', 'high'], retbins=True, right=True) #['low', 'medium', 'high'] model_input['target'].value_counts(), edges model_input = model_input[model_input['target'] != "medium"] model_input['target'] = model_input['target'].astype(str).apply(lambda x: 0 if x == "low" else 1) model_input['target'].value_counts() if cv_method_str == 'halflogo': model_input['pid_index'] = model_input.groupby('pid').cumcount() model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count') model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round() model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"] else: data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] # %% jupyter={"source_hidden": true} categorical_feature_colnames = ["gender", "startlanguage"] additional_categorical_features = [col for col in data_x.columns if "mostcommonactivity" in col or "homelabel" in col] categorical_feature_colnames += additional_categorical_features categorical_features = data_x[categorical_feature_colnames].copy() mode_categorical_features = categorical_features.mode().iloc[0] # fillna with mode categorical_features = categorical_features.fillna(mode_categorical_features) # one-hot encoding categorical_features = categorical_features.apply(lambda col: col.astype("category")) if not categorical_features.empty: categorical_features = pd.get_dummies(categorical_features) numerical_features = data_x.drop(categorical_feature_colnames, axis=1) train_x = pd.concat([numerical_features, categorical_features], axis=1) # %% jupyter={"source_hidden": true} cv_method = None # Defaults to 5 k-folds in cross_validate method if cv_method_str == 'logo' or cv_method_str == 'half_logo': cv_method = LeaveOneGroupOut() cv_method.get_n_splits( train_x, data_y, groups=data_groups, ) # %% jupyter={"source_hidden": true} imputer = SimpleImputer(missing_values=np.nan, strategy='median') # %% final_scores = run_all_classification_models(imputer.fit_transform(train_x), data_y, data_groups, cv_method) # %% final_scores.index.name = "metric" final_scores = final_scores.set_index(["method", final_scores.index]) final_scores.to_csv("event_stressfulness_lmh_lh_scores.csv")