From 48118f125d837d6815275177ad3f7ef055a35c1b Mon Sep 17 00:00:00 2001 From: junos Date: Fri, 21 Apr 2023 21:34:54 +0200 Subject: [PATCH] Reformat ml_pipeline_regression.py --- exploration/ml_pipeline_regression.py | 355 +++++++++++++++++++------- 1 file changed, 256 insertions(+), 99 deletions(-) diff --git a/exploration/ml_pipeline_regression.py b/exploration/ml_pipeline_regression.py index 98b2e3f..e4ea488 100644 --- a/exploration/ml_pipeline_regression.py +++ b/exploration/ml_pipeline_regression.py @@ -15,64 +15,69 @@ # %% jupyter={"source_hidden": true} # %matplotlib inline -import datetime -import importlib import os import sys import numpy as np -import matplotlib.pyplot as plt import pandas as pd -import seaborn as sns -import yaml -from pyprojroot import here -from sklearn import linear_model, svm, kernel_ridge, gaussian_process -from sklearn.model_selection import LeaveOneGroupOut, cross_val_score, cross_validate -from sklearn.metrics import mean_squared_error, r2_score -from sklearn.impute import SimpleImputer -from sklearn.dummy import DummyRegressor import xgboost as xg -from IPython.core.interactiveshell import InteractiveShell -InteractiveShell.ast_node_interactivity = "all" +from sklearn import gaussian_process, kernel_ridge, linear_model, svm +from sklearn.dummy import DummyRegressor +from sklearn.impute import SimpleImputer +from sklearn.model_selection import LeaveOneGroupOut, cross_validate + +# from IPython.core.interactiveshell import InteractiveShell +# InteractiveShell.ast_node_interactivity = "all" nb_dir = os.path.split(os.getcwd())[0] if nb_dir not in sys.path: sys.path.append(nb_dir) -import machine_learning.features_sensor -import machine_learning.labels -import machine_learning.model - -# %% [markdown] -# # RAPIDS models - -# %% [markdown] -# ## PANAS negative affect +# %% jupyter={"source_hidden": true} +model_input = pd.read_csv( + "../data/intradaily_30_min_all_targets/input_JCQ_job_demand_mean.csv" +) # %% jupyter={"source_hidden": true} -model_input = pd.read_csv("../data/intradaily_30_min_all_targets/input_JCQ_job_demand_mean.csv") - -# %% jupyter={"source_hidden": true} -index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"] -#if "pid" in model_input.columns: +index_columns = [ + "local_segment", + "local_segment_label", + "local_segment_start_datetime", + "local_segment_end_datetime", +] +# if "pid" in model_input.columns: # index_columns.append("pid") model_input.set_index(index_columns, inplace=True) -cv_method = 'half_logo' # logo, half_logo, 5kfold -if cv_method == 'logo': - data_x, data_y, data_groups = model_input.drop(["target", "pid"], axis=1), model_input["target"], model_input["pid"] +cv_method = "half_logo" # logo, half_logo, 5kfold +if cv_method == "logo": + data_x, data_y, data_groups = ( + model_input.drop(["target", "pid"], axis=1), + model_input["target"], + model_input["pid"], + ) else: - model_input['pid_index'] = model_input.groupby('pid').cumcount() - model_input['pid_count'] = model_input.groupby('pid')['pid'].transform('count') + model_input["pid_index"] = model_input.groupby("pid").cumcount() + model_input["pid_count"] = model_input.groupby("pid")["pid"].transform("count") - model_input["pid_index"] = (model_input['pid_index'] / model_input['pid_count'] + 1).round() - model_input["pid_half"] = model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) + model_input["pid_index"] = ( + model_input["pid_index"] / model_input["pid_count"] + 1 + ).round() + model_input["pid_half"] = ( + model_input["pid"] + "_" + model_input["pid_index"].astype(int).astype(str) + ) - data_x, data_y, data_groups = model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), model_input["target"], model_input["pid_half"] + data_x, data_y, data_groups = ( + model_input.drop(["target", "pid", "pid_index", "pid_half"], axis=1), + model_input["target"], + model_input["pid_half"], + ) # %% jupyter={"source_hidden": true} categorical_feature_colnames = ["gender", "startlanguage"] -additional_categorical_features = [col for col in data_x.columns if "mostcommonactivity" in col or "homelabel" in col] +additional_categorical_features = [ + col for col in data_x.columns if "mostcommonactivity" in col or "homelabel" in col +] categorical_feature_colnames += additional_categorical_features # %% jupyter={"source_hidden": true} @@ -109,7 +114,7 @@ logo.get_n_splits( ) # Defaults to 5 k folds in cross_validate method -if cv_method != 'logo' and cv_method != 'half_logo': +if cv_method != "logo" and cv_method != "half_logo": logo = None # %% jupyter={"source_hidden": true} @@ -120,7 +125,7 @@ sum(data_y.isna()) dummy_regr = DummyRegressor(strategy="mean") # %% jupyter={"source_hidden": true} -imputer = SimpleImputer(missing_values=np.nan, strategy='mean') +imputer = SimpleImputer(missing_values=np.nan, strategy="mean") # %% jupyter={"source_hidden": true} dummy_regressor = cross_validate( @@ -130,12 +135,26 @@ dummy_regressor = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(dummy_regressor['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(dummy_regressor['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(dummy_regressor['test_neg_root_mean_squared_error'])) -print("R2", np.median(dummy_regressor['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(dummy_regressor["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(dummy_regressor["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(dummy_regressor["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(dummy_regressor["test_r2"])) # %% [markdown] # ### Linear Regression @@ -143,7 +162,7 @@ print("R2", np.median(dummy_regressor['test_r2'])) # %% jupyter={"source_hidden": true} lin_reg_rapids = linear_model.LinearRegression() # %% jupyter={"source_hidden": true} -imputer = SimpleImputer(missing_values=np.nan, strategy='mean') +imputer = SimpleImputer(missing_values=np.nan, strategy="mean") # %% jupyter={"source_hidden": true} lin_reg_scores = cross_validate( @@ -153,19 +172,33 @@ lin_reg_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(lin_reg_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(lin_reg_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(lin_reg_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(lin_reg_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(lin_reg_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(lin_reg_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(lin_reg_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(lin_reg_scores["test_r2"])) # %% [markdown] # ### XGBRegressor Linear Regression # %% jupyter={"source_hidden": true} -xgb_r = xg.XGBRegressor(objective ='reg:squarederror', n_estimators = 10) +xgb_r = xg.XGBRegressor(objective="reg:squarederror", n_estimators=10) # %% jupyter={"source_hidden": true} -imputer = SimpleImputer(missing_values=np.nan, strategy='mean') +imputer = SimpleImputer(missing_values=np.nan, strategy="mean") # %% jupyter={"source_hidden": true} xgb_reg_scores = cross_validate( @@ -175,19 +208,33 @@ xgb_reg_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(xgb_reg_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(xgb_reg_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(xgb_reg_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(xgb_reg_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(xgb_reg_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(xgb_reg_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(xgb_reg_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(xgb_reg_scores["test_r2"])) # %% [markdown] # ### XGBRegressor Pseudo Huber Error Regression # %% jupyter={"source_hidden": true} -xgb_psuedo_huber_r = xg.XGBRegressor(objective ='reg:pseudohubererror', n_estimators = 10) +xgb_psuedo_huber_r = xg.XGBRegressor(objective="reg:pseudohubererror", n_estimators=10) # %% jupyter={"source_hidden": true} -imputer = SimpleImputer(missing_values=np.nan, strategy='mean') +imputer = SimpleImputer(missing_values=np.nan, strategy="mean") # %% jupyter={"source_hidden": true} xgb_psuedo_huber_reg_scores = cross_validate( @@ -197,18 +244,32 @@ xgb_psuedo_huber_reg_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(xgb_psuedo_huber_reg_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(xgb_psuedo_huber_reg_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(xgb_psuedo_huber_reg_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(xgb_psuedo_huber_reg_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(xgb_psuedo_huber_reg_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(xgb_psuedo_huber_reg_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(xgb_psuedo_huber_reg_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(xgb_psuedo_huber_reg_scores["test_r2"])) # %% [markdown] # ### Ridge regression # %% jupyter={"source_hidden": true} -ridge_reg = linear_model.Ridge(alpha=.5) +ridge_reg = linear_model.Ridge(alpha=0.5) # %% tags=[] jupyter={"source_hidden": true} ridge_reg_scores = cross_validate( @@ -218,12 +279,26 @@ ridge_reg_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(ridge_reg_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(ridge_reg_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(ridge_reg_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(ridge_reg_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(ridge_reg_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(ridge_reg_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(ridge_reg_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(ridge_reg_scores["test_r2"])) # %% [markdown] # ### Lasso @@ -239,12 +314,26 @@ lasso_reg_score = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(lasso_reg_score['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(lasso_reg_score['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(lasso_reg_score['test_neg_root_mean_squared_error'])) -print("R2", np.median(lasso_reg_score['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(lasso_reg_score["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(lasso_reg_score["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(lasso_reg_score["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(lasso_reg_score["test_r2"])) # %% [markdown] # ### Bayesian Ridge @@ -260,12 +349,26 @@ bayesian_ridge_reg_score = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(bayesian_ridge_reg_score['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(bayesian_ridge_reg_score['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(bayesian_ridge_reg_score['test_neg_root_mean_squared_error'])) -print("R2", np.median(bayesian_ridge_reg_score['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(bayesian_ridge_reg_score["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(bayesian_ridge_reg_score["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(bayesian_ridge_reg_score["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(bayesian_ridge_reg_score["test_r2"])) # %% [markdown] # ### RANSAC (outlier robust regression) @@ -281,12 +384,26 @@ ransac_reg_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(ransac_reg_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(ransac_reg_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(ransac_reg_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(ransac_reg_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(ransac_reg_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(ransac_reg_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(ransac_reg_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(ransac_reg_scores["test_r2"])) # %% [markdown] # ### Support vector regression @@ -302,12 +419,25 @@ svr_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(svr_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(svr_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(svr_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(svr_scores['test_r2'])) +print( + "Negative Mean Squared Error", np.median(svr_scores["test_neg_mean_squared_error"]) +) +print( + "Negative Mean Absolute Error", + np.median(svr_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(svr_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(svr_scores["test_r2"])) # %% [markdown] # ### Kernel Ridge regression @@ -323,12 +453,26 @@ kridge_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(kridge_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(kridge_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(kridge_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(kridge_scores['test_r2'])) +print( + "Negative Mean Squared Error", + np.median(kridge_scores["test_neg_mean_squared_error"]), +) +print( + "Negative Mean Absolute Error", + np.median(kridge_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(kridge_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(kridge_scores["test_r2"])) # %% [markdown] # ### Gaussian Process Regression @@ -345,11 +489,24 @@ gpr_scores = cross_validate( groups=data_groups, cv=logo, n_jobs=-1, - scoring=('r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_root_mean_squared_error') + scoring=( + "r2", + "neg_mean_squared_error", + "neg_mean_absolute_error", + "neg_root_mean_squared_error", + ), ) -print("Negative Mean Squared Error", np.median(gpr_scores['test_neg_mean_squared_error'])) -print("Negative Mean Absolute Error", np.median(gpr_scores['test_neg_mean_absolute_error'])) -print("Negative Root Mean Squared Error", np.median(gpr_scores['test_neg_root_mean_squared_error'])) -print("R2", np.median(gpr_scores['test_r2'])) +print( + "Negative Mean Squared Error", np.median(gpr_scores["test_neg_mean_squared_error"]) +) +print( + "Negative Mean Absolute Error", + np.median(gpr_scores["test_neg_mean_absolute_error"]), +) +print( + "Negative Root Mean Squared Error", + np.median(gpr_scores["test_neg_root_mean_squared_error"]), +) +print("R2", np.median(gpr_scores["test_r2"])) # %%