Compare commits
2 Commits
c05b047c2d
...
48578d8800
Author | SHA1 | Date |
---|---|---|
junos | 48578d8800 | |
junos | 2fe1b37f55 |
|
@ -0,0 +1,267 @@
|
|||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import features.esm
|
||||
|
||||
QUESTIONNAIRE_ID_SAM = {
|
||||
"event_stress": 87,
|
||||
"event_threat": 88,
|
||||
"event_challenge": 89,
|
||||
"event_time": 90,
|
||||
"event_duration": 91,
|
||||
"event_work_related": 92,
|
||||
"period_stress": 93,
|
||||
}
|
||||
QUESTIONNAIRE_ID_SAM_LOW = min(QUESTIONNAIRE_ID_SAM.values())
|
||||
QUESTIONNAIRE_ID_SAM_HIGH = max(QUESTIONNAIRE_ID_SAM.values())
|
||||
|
||||
GROUP_QUESTIONNAIRES_BY = [
|
||||
"participant_id",
|
||||
"device_id",
|
||||
"esm_session",
|
||||
]
|
||||
# Each questionnaire occurs only once within each esm_session on the same device within the same participant.
|
||||
|
||||
|
||||
def extract_stressful_events(df_esm: pd.DataFrame) -> pd.DataFrame:
|
||||
# 0. Select only questions from Stress Appraisal Measure.
|
||||
df_esm_preprocessed = features.esm.preprocess_esm(df_esm)
|
||||
df_esm_sam = df_esm_preprocessed[
|
||||
(df_esm_preprocessed["questionnaire_id"] >= QUESTIONNAIRE_ID_SAM_LOW)
|
||||
& (df_esm_preprocessed["questionnaire_id"] <= QUESTIONNAIRE_ID_SAM_HIGH)
|
||||
]
|
||||
|
||||
df_esm_sam_clean = features.esm.clean_up_esm(df_esm_sam)
|
||||
# 1.
|
||||
df_esm_event_threat_challenge_mean_wide = calculate_threat_challenge_means(
|
||||
df_esm_sam_clean
|
||||
)
|
||||
# 2.
|
||||
df_esm_event_stress = detect_stressful_event(df_esm_sam_clean)
|
||||
|
||||
# Join to the previously calculated features related to the events.
|
||||
df_esm_events = df_esm_event_threat_challenge_mean_wide.join(
|
||||
df_esm_event_stress[
|
||||
GROUP_QUESTIONNAIRES_BY + ["event_present", "event_stressfulness"]
|
||||
].set_index(GROUP_QUESTIONNAIRES_BY)
|
||||
)
|
||||
|
||||
# 3.
|
||||
df_esm_event_work_related = detect_event_work_related(df_esm_sam_clean)
|
||||
|
||||
df_esm_events = df_esm_events.join(
|
||||
df_esm_event_work_related[
|
||||
GROUP_QUESTIONNAIRES_BY + ["event_work_related"]
|
||||
].set_index(GROUP_QUESTIONNAIRES_BY)
|
||||
)
|
||||
|
||||
# 4.
|
||||
df_esm_event_time = convert_event_time(df_esm_sam_clean)
|
||||
|
||||
df_esm_events = df_esm_events.join(
|
||||
df_esm_event_time[GROUP_QUESTIONNAIRES_BY + ["event_time"]].set_index(
|
||||
GROUP_QUESTIONNAIRES_BY
|
||||
)
|
||||
)
|
||||
|
||||
# 5.
|
||||
df_esm_event_duration = extract_event_duration(df_esm_sam_clean)
|
||||
|
||||
df_esm_events = df_esm_events.join(
|
||||
df_esm_event_duration[
|
||||
GROUP_QUESTIONNAIRES_BY + ["event_duration", "event_duration_info"]
|
||||
].set_index(GROUP_QUESTIONNAIRES_BY)
|
||||
)
|
||||
|
||||
return df_esm_events
|
||||
|
||||
|
||||
def calculate_threat_challenge_means(df_esm_sam_clean: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
This function calculates challenge and threat (two Stress Appraisal Measure subscales) means,
|
||||
for each ESM session (within participants and devices).
|
||||
It creates a grouped dataframe with means in two columns.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df_esm_sam_clean: pd.DataFrame
|
||||
A cleaned up dataframe of Stress Appraisal Measure items.
|
||||
|
||||
Returns
|
||||
-------
|
||||
df_esm_event_threat_challenge_mean_wide: pd.DataFrame
|
||||
A dataframe of unique ESM sessions (by participants and devices) with threat and challenge means.
|
||||
"""
|
||||
# Select only threat and challenge assessments for events
|
||||
df_esm_event_threat_challenge = df_esm_sam_clean[
|
||||
(
|
||||
df_esm_sam_clean["questionnaire_id"]
|
||||
== QUESTIONNAIRE_ID_SAM.get("event_threat")
|
||||
)
|
||||
| (
|
||||
df_esm_sam_clean["questionnaire_id"]
|
||||
== QUESTIONNAIRE_ID_SAM.get("event_challenge")
|
||||
)
|
||||
]
|
||||
# Calculate mean of threat and challenge subscales for each ESM session.
|
||||
df_esm_event_threat_challenge_mean_wide = pd.pivot_table(
|
||||
df_esm_event_threat_challenge,
|
||||
index=["participant_id", "device_id", "esm_session"],
|
||||
columns=["questionnaire_id"],
|
||||
values=["esm_user_answer_numeric"],
|
||||
aggfunc="mean",
|
||||
)
|
||||
# Drop unnecessary column values.
|
||||
df_esm_event_threat_challenge_mean_wide.columns = df_esm_event_threat_challenge_mean_wide.columns.get_level_values(
|
||||
1
|
||||
)
|
||||
df_esm_event_threat_challenge_mean_wide.columns.name = None
|
||||
df_esm_event_threat_challenge_mean_wide.rename(
|
||||
columns={
|
||||
QUESTIONNAIRE_ID_SAM.get("event_threat"): "threat_mean",
|
||||
QUESTIONNAIRE_ID_SAM.get("event_challenge"): "challenge_mean",
|
||||
},
|
||||
inplace=True,
|
||||
)
|
||||
return df_esm_event_threat_challenge_mean_wide
|
||||
|
||||
|
||||
def detect_stressful_event(df_esm_sam_clean: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
Participants were asked: "Was there a particular event that created tension in you?"
|
||||
The following options were available:
|
||||
0 - No,
|
||||
1 - Yes, slightly,
|
||||
2 - Yes, moderately,
|
||||
3 - Yes, considerably,
|
||||
4 - Yes, extremely.
|
||||
This function indicates whether there was a stressful event (True/False)
|
||||
and how stressful it was on a scale of 1 to 4.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df_esm_sam_clean: pd.DataFrame
|
||||
A cleaned up dataframe of Stress Appraisal Measure items.
|
||||
|
||||
Returns
|
||||
-------
|
||||
df_esm_event_stress: pd.DataFrame
|
||||
The same dataframe with two new columns:
|
||||
- event_present, indicating whether there was a stressful event at all,
|
||||
- event_stressfulness, a numeric answer (1-4) to the single item question.
|
||||
|
||||
"""
|
||||
df_esm_event_stress = df_esm_sam_clean[
|
||||
df_esm_sam_clean["questionnaire_id"] == QUESTIONNAIRE_ID_SAM.get("event_stress")
|
||||
]
|
||||
df_esm_event_stress = df_esm_event_stress.assign(
|
||||
event_present=lambda x: x.esm_user_answer_numeric > 0,
|
||||
event_stressfulness=lambda x: x.esm_user_answer_numeric,
|
||||
)
|
||||
return df_esm_event_stress
|
||||
|
||||
|
||||
def detect_event_work_related(df_esm_sam_clean: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
This function simply adds a column indicating the answer to the question:
|
||||
"Was/is this event work-related?"
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df_esm_sam_clean: pd.DataFrame
|
||||
A cleaned up dataframe of Stress Appraisal Measure items.
|
||||
|
||||
Returns
|
||||
-------
|
||||
df_esm_event_stress: pd.DataFrame
|
||||
The same dataframe with a new column event_work_related (True/False).
|
||||
|
||||
"""
|
||||
df_esm_event_stress = df_esm_sam_clean[
|
||||
df_esm_sam_clean["questionnaire_id"]
|
||||
== QUESTIONNAIRE_ID_SAM.get("event_work_related")
|
||||
]
|
||||
df_esm_event_stress = df_esm_event_stress.assign(
|
||||
event_work_related=lambda x: x.esm_user_answer_numeric > 0
|
||||
)
|
||||
return df_esm_event_stress
|
||||
|
||||
|
||||
def convert_event_time(df_esm_sam_clean: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
This function only serves to convert the string datetime answer into a real datetime type.
|
||||
Errors during this conversion are coerced, meaning that non-datetime answers are assigned Not a Time (NaT).
|
||||
NOTE: Since the only available non-datetime answer to this question was "0 - I do not remember",
|
||||
the NaTs can be interpreted to mean this.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df_esm_sam_clean: pd.DataFrame
|
||||
A cleaned up dataframe of Stress Appraisal Measure items.
|
||||
|
||||
Returns
|
||||
-------
|
||||
df_esm_event_time: pd.DataFrame
|
||||
The same dataframe with a new column event_time of datetime type.
|
||||
"""
|
||||
df_esm_event_time = df_esm_sam_clean[
|
||||
df_esm_sam_clean["questionnaire_id"] == QUESTIONNAIRE_ID_SAM.get("event_time")
|
||||
].assign(
|
||||
event_time=lambda x: pd.to_datetime(
|
||||
x.esm_user_answer, errors="coerce", infer_datetime_format=True, exact=True
|
||||
)
|
||||
)
|
||||
return df_esm_event_time
|
||||
|
||||
|
||||
def extract_event_duration(df_esm_sam_clean: pd.DataFrame) -> pd.DataFrame:
|
||||
"""
|
||||
If participants indicated a stressful events, they were asked:
|
||||
"How long did this event last? (Answer in hours and minutes)"
|
||||
This function extracts this duration time and saves additional answers:
|
||||
0 - I do not remember,
|
||||
1 - It is still going on.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df_esm_sam_clean: pd.DataFrame
|
||||
A cleaned up dataframe of Stress Appraisal Measure items.
|
||||
|
||||
Returns
|
||||
-------
|
||||
df_esm_event_duration: pd.DataFrame
|
||||
The same dataframe with two new columns:
|
||||
- event_duration, a time part of a datetime,
|
||||
- event_duration_info, giving other options to this question:
|
||||
0 - I do not remember,
|
||||
1 - It is still going on
|
||||
"""
|
||||
df_esm_event_duration = df_esm_sam_clean[
|
||||
df_esm_sam_clean["questionnaire_id"]
|
||||
== QUESTIONNAIRE_ID_SAM.get("event_duration")
|
||||
].assign(
|
||||
event_duration=lambda x: pd.to_datetime(
|
||||
x.esm_user_answer.str.slice(start=0, stop=-6), errors="coerce"
|
||||
).dt.time
|
||||
)
|
||||
# TODO Explore the values recorded in event_duration and possibly fix mistakes.
|
||||
# For example, participants reported setting 23:50:00 instead of 00:50:00.
|
||||
|
||||
# For the events that no duration was found (i.e. event_duration = NaT),
|
||||
# we can determine whether:
|
||||
# - this event is still going on ("1 - It is still going on")
|
||||
# - the participant couldn't remember it's duration ("0 - I do not remember")
|
||||
# Generally, these answers were converted to esm_user_answer_numeric in clean_up_esm,
|
||||
# but only the numeric types of questions and answers.
|
||||
# Since this was of "datetime" type, convert these specific answers here again.
|
||||
df_esm_event_duration["event_duration_info"] = np.nan
|
||||
df_esm_event_duration[
|
||||
df_esm_event_duration.event_duration.isna()
|
||||
] = df_esm_event_duration[df_esm_event_duration.event_duration.isna()].assign(
|
||||
event_duration_info=lambda x: x.esm_user_answer.str.slice(stop=1).astype(int)
|
||||
)
|
||||
|
||||
return df_esm_event_duration
|
||||
|
||||
|
||||
# TODO: How many questions about the stressfulness of the period were asked and how does this relate to events?
|
|
@ -0,0 +1,87 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
from modelling_utils import get_matching_col_names, get_norm_all_participants_scaler
|
||||
|
||||
|
||||
def preprocess_numerical_features(train_numerical_features, test_numerical_features, scaler, flag):
|
||||
# fillna with mean
|
||||
if flag == "train":
|
||||
numerical_features = train_numerical_features.fillna(train_numerical_features.mean())
|
||||
elif flag == "test":
|
||||
numerical_features = test_numerical_features.fillna(train_numerical_features.mean())
|
||||
else:
|
||||
raise ValueError("flag should be 'train' or 'test'")
|
||||
# normalize
|
||||
if scaler != "notnormalized":
|
||||
scaler = get_norm_all_participants_scaler(train_numerical_features, scaler)
|
||||
numerical_features = pd.DataFrame(scaler.transform(numerical_features), index=numerical_features.index, columns=numerical_features.columns)
|
||||
|
||||
return numerical_features
|
||||
|
||||
|
||||
def preprocess_categorical_features(categorical_features, mode_categorical_features):
|
||||
# fillna with mode
|
||||
categorical_features = categorical_features.fillna(mode_categorical_features)
|
||||
# one-hot encoding
|
||||
categorical_features = categorical_features.apply(lambda col: col.astype("category"))
|
||||
if not categorical_features.empty:
|
||||
categorical_features = pd.get_dummies(categorical_features)
|
||||
return categorical_features
|
||||
|
||||
|
||||
def split_numerical_categorical_features(features, categorical_feature_colnames):
|
||||
numerical_features = features.drop(categorical_feature_colnames, axis=1)
|
||||
categorical_features = features[categorical_feature_colnames].copy()
|
||||
return numerical_features, categorical_features
|
||||
|
||||
def preproces_Features(train_numerical_features, test_numerical_features, categorical_features, mode_categorical_features, scaler, flag):
|
||||
numerical_features = preprocess_numerical_features(train_numerical_features, test_numerical_features, scaler, flag)
|
||||
categorical_features = preprocess_categorical_features(categorical_features, mode_categorical_features)
|
||||
features = pd.concat([numerical_features, categorical_features], axis=1)
|
||||
return features
|
||||
|
||||
##############################################################
|
||||
# Summary of the workflow
|
||||
# Step 1. Read parameters and data
|
||||
# Step 2. Nested cross validation
|
||||
# Step 3. Model evaluation
|
||||
# Step 4. Save results, parameters, and metrics to CSV files
|
||||
##############################################################
|
||||
|
||||
# For reproducibility
|
||||
np.random.seed(0)
|
||||
|
||||
# Step 1. Read parameters and data
|
||||
# Read parameters
|
||||
model = snakemake.params["model"]
|
||||
scaler = snakemake.params["scaler"]
|
||||
cv_method = snakemake.params["cv_method"]
|
||||
categorical_operators = snakemake.params["categorical_operators"]
|
||||
categorical_colnames_demographic_features = snakemake.params["categorical_demographic_features"]
|
||||
model_hyperparams = snakemake.params["model_hyperparams"][model]
|
||||
|
||||
# Read data and split
|
||||
data = pd.read_csv(snakemake.input["data"])
|
||||
index_columns = ["local_segment", "local_segment_label", "local_segment_start_datetime", "local_segment_end_datetime"]
|
||||
if "pid" in data.columns:
|
||||
index_columns.append("pid")
|
||||
data.set_index(index_columns, inplace=True)
|
||||
|
||||
data_x, data_y = data.drop("target", axis=1), data[["target"]]
|
||||
|
||||
if "pid" in index_columns:
|
||||
categorical_feature_colnames = categorical_colnames_demographic_features + get_matching_col_names(categorical_operators, data_x)
|
||||
else:
|
||||
categorical_feature_colnames = get_matching_col_names(categorical_operators, data_x)
|
||||
|
||||
# Split train and test, numerical and categorical features
|
||||
train_x, test_x = data_x, data_x
|
||||
train_numerical_features, train_categorical_features = split_numerical_categorical_features(train_x, categorical_feature_colnames)
|
||||
train_y, test_y = data_y, data_y
|
||||
test_numerical_features, test_categorical_features = split_numerical_categorical_features(test_x, categorical_feature_colnames)
|
||||
|
||||
# Preprocess: impute and normalize
|
||||
mode_categorical_features = train_categorical_features.mode().iloc[0]
|
||||
train_x = preproces_Features(train_numerical_features, None, train_categorical_features, mode_categorical_features, scaler, "train")
|
||||
test_x = preproces_Features(train_numerical_features, test_numerical_features, test_categorical_features, mode_categorical_features, scaler, "test")
|
||||
train_x, test_x = train_x.align(test_x, join="outer", axis=1, fill_value=0) # in case we get rid off categorical columns
|
|
@ -0,0 +1,27 @@
|
|||
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
|
||||
|
||||
|
||||
def get_matching_col_names(operators, features):
|
||||
col_names = []
|
||||
for col in features.columns:
|
||||
if any(operator in col for operator in operators):
|
||||
col_names.append(col)
|
||||
return col_names
|
||||
|
||||
|
||||
# normalize based on all participants: return fitted scaler
|
||||
def get_norm_all_participants_scaler(features, scaler_flag):
|
||||
# MinMaxScaler
|
||||
if scaler_flag == "minmaxscaler":
|
||||
scaler = MinMaxScaler()
|
||||
# StandardScaler
|
||||
elif scaler_flag == "standardscaler":
|
||||
scaler = StandardScaler()
|
||||
# RobustScaler
|
||||
elif scaler_flag == "robustscaler":
|
||||
scaler = RobustScaler()
|
||||
else:
|
||||
# throw exception
|
||||
raise ValueError("The normalization method is not predefined, please check if the PARAMS_FOR_ANALYSIS.NORMALIZED in config.yaml file is correct.")
|
||||
scaler.fit(features)
|
||||
return scaler
|
Loading…
Reference in New Issue