Compare commits

..

No commits in common. "c1bb4ddf0fe9b161e26e3c0339db975418a5483a" and "6592612db73a698005dbe10b8a1acf842a771755" have entirely different histories.

10 changed files with 157 additions and 377 deletions

View File

@ -15,7 +15,6 @@ dependencies:
- psycopg2
- python-dotenv
- pytz
- pyprojroot
- pyyaml
- seaborn
- scikit-learn

View File

@ -16,7 +16,6 @@
# %%
# %matplotlib inline
import datetime
import importlib
import os
import sys
@ -157,25 +156,14 @@ lin_reg_proximity.score(
# %%
from machine_learning import pipeline
# %%
importlib.reload(pipeline)
# %%
with open("../machine_learning/config/minimal_features.yaml", "r") as file:
sensor_features_params = yaml.safe_load(file)
print(sensor_features_params)
# %%
sensor_features = pipeline.SensorFeatures(**sensor_features_params)
sensor_features.data_types
# %%
sensor_features.set_participants_label("nokia_0000003")
# %%
sensor_features.data_types = ["proximity", "communication"]
sensor_features.participants_usernames = ptcp_2
# %%
sensor_features.get_sensor_data("proximity")
@ -191,19 +179,12 @@ sensor_features.calculate_features()
# %%
sensor_features.get_features("proximity", "all")
# %%
sensor_features.get_features("communication", "all")
# %%
sensor_features.get_features("all", "all")
# %%
with open("../machine_learning/config/minimal_labels.yaml", "r") as file:
labels_params = yaml.safe_load(file)
# %%
labels = pipeline.Labels(**labels_params)
labels.participants_usernames = ptcp_2
labels.questionnaires
# %%
@ -213,25 +194,3 @@ labels.set_labels()
labels.get_labels("PANAS")
# %%
labels.aggregate_labels()
# %%
labels.get_aggregated_labels()
# %%
model_validation = pipeline.ModelValidation(
sensor_features.get_features("all", "all"),
labels.get_aggregated_labels(),
group_variable="participant_id",
cv_name="loso",
)
model_validation.model = linear_model.LinearRegression()
model_validation.set_cv_method()
# %%
model_validation.cross_validate()
# %%
model_validation.groups
# %%

View File

@ -13,15 +13,14 @@
# name: straw2analysis
# ---
# %%
import importlib
# %%
# %matplotlib inline
import os
import sys
import matplotlib.pyplot as plt
# %%
import seaborn as sns
nb_dir = os.path.split(os.getcwd())[0]
@ -29,29 +28,21 @@ if nb_dir not in sys.path:
sys.path.append(nb_dir)
# %%
from features import communication, helper
# %%
importlib.reload(communication)
from features.communication import *
# %% [markdown]
# # Example of communication data and feature calculation
# %%
df_calls = communication.get_call_data(["nokia_0000003"])
df_calls = get_call_data(["nokia_0000003"])
print(df_calls)
# %%
df_calls = helper.get_date_from_timestamp(df_calls)
communication.count_comms(df_calls, ["date_lj"])
count_comms(df_calls)
# %%
df_sms = communication.get_sms_data(["nokia_0000003"])
df_sms = helper.get_date_from_timestamp(df_sms)
communication.count_comms(df_sms, ["date_lj"])
# %%
communication.calls_sms_features(df_calls, df_sms, ["date_lj"])
df_sms = get_sms_data(["nokia_0000003"])
count_comms(df_sms)
# %% [markdown]
# # Call data

View File

@ -16,7 +16,6 @@
# %%
# %matplotlib inline
import datetime
import importlib
import os
import sys
@ -33,16 +32,13 @@ import participants.query_db
TZ_LJ = timezone("Europe/Ljubljana")
# %%
from features import helper, proximity
# %%
importlib.reload(proximity)
from features.proximity import *
# %% [markdown]
# # Basic characteristics
# %%
df_proximity_nokia = proximity.get_proximity_data(["nokia_0000003"])
df_proximity_nokia = get_proximity_data(["nokia_0000003"])
print(df_proximity_nokia)
# %%
@ -57,7 +53,7 @@ df_proximity_nokia.double_proximity.value_counts()
# %%
participants_inactive_usernames = participants.query_db.get_usernames()
df_proximity_inactive = proximity.get_proximity_data(participants_inactive_usernames)
df_proximity_inactive = get_proximity_data(participants_inactive_usernames)
# %%
df_proximity_inactive.double_proximity.describe()
@ -114,13 +110,3 @@ df_proximity_combinations[
(df_proximity_combinations[5.0] != 0)
& (df_proximity_combinations[5.00030517578125] != 0)
]
# %% [markdown]
# # Features
# %%
df_proximity_inactive = helper.get_date_from_timestamp(df_proximity_inactive)
# %%
df_proximity_features = proximity.count_proximity(df_proximity_inactive, ["date_lj"])
display(df_proximity_features)

View File

@ -8,21 +8,14 @@ from setup import db_engine, session
call_types = {1: "incoming", 2: "outgoing", 3: "missed"}
sms_types = {1: "received", 2: "sent"}
FILL_NA_CALLS = {
"no_calls_all": 0,
"no_" + call_types.get(1): 0,
"no_" + call_types.get(2): 0,
"no_" + call_types.get(3): 0,
"duration_total_" + call_types.get(1): 0,
"duration_total_" + call_types.get(2): 0,
"duration_max_" + call_types.get(1): 0,
"duration_max_" + call_types.get(2): 0,
"no_" + call_types.get(1) + "_ratio": 1 / 3, # Three different types
"no_" + call_types.get(2) + "_ratio": 1 / 3,
"no_contacts_calls": 0,
}
FEATURES_CALLS = list(FILL_NA_CALLS.keys())
FEATURES_CALLS = (
["no_calls_all"]
+ ["no_" + call_type for call_type in call_types.values()]
+ ["duration_total_" + call_types.get(1), "duration_total_" + call_types.get(2)]
+ ["duration_max_" + call_types.get(1), "duration_max_" + call_types.get(2)]
+ ["no_" + call_types.get(1) + "_ratio", "no_" + call_types.get(2) + "_ratio"]
+ ["no_contacts_calls"]
)
# FEATURES_CALLS =
# ["no_calls_all",
@ -30,24 +23,19 @@ FEATURES_CALLS = list(FILL_NA_CALLS.keys())
# "duration_total_incoming", "duration_total_outgoing",
# "duration_max_incoming", "duration_max_outgoing",
# "no_incoming_ratio", "no_outgoing_ratio",
# "no_contacts_calls"]
FILL_NA_SMS = {
"no_sms_all": 0,
"no_" + sms_types.get(1): 0,
"no_" + sms_types.get(2): 0,
"no_" + sms_types.get(1) + "_ratio": 1 / 2, # Two different types
"no_" + sms_types.get(2) + "_ratio": 1 / 2,
"no_contacts_sms": 0,
}
FEATURES_SMS = list(FILL_NA_SMS.keys())
# "no_contacts"]
FEATURES_SMS = (
["no_sms_all"]
+ ["no_" + sms_type for sms_type in sms_types.values()]
+ ["no_" + sms_types.get(1) + "_ratio", "no_" + sms_types.get(2) + "_ratio"]
+ ["no_contacts_sms"]
)
# FEATURES_SMS =
# ["no_sms_all",
# "no_received", "no_sent",
# "no_received_ratio", "no_sent_ratio",
# "no_contacts_sms"]
# "no_contacts"]
FEATURES_CALLS_SMS_PROP = [
"proportion_calls_all",
@ -57,15 +45,8 @@ FEATURES_CALLS_SMS_PROP = [
"proportion_calls_missed_sms_received",
]
FILL_NA_CALLS_SMS_PROP = {
key: 1 / 2 for key in FEATURES_CALLS_SMS_PROP
} # All of the form of a / (a + b).
FEATURES_CALLS_SMS_ALL = FEATURES_CALLS + FEATURES_SMS + FEATURES_CALLS_SMS_PROP
FILL_NA_CALLS_SMS_ALL = FILL_NA_CALLS | FILL_NA_SMS | FILL_NA_CALLS_SMS_PROP
# As per PEP-584 a union for dicts was implemented in Python 3.9.0.
def get_call_data(usernames: Collection) -> pd.DataFrame:
"""
@ -156,7 +137,7 @@ def enumerate_contacts(comm_df: pd.DataFrame) -> pd.DataFrame:
return comm_df
def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
def count_comms(comm_df: pd.DataFrame) -> pd.DataFrame:
"""
Calculate frequencies (and duration) of messages (or calls), grouped by their types.
@ -164,9 +145,6 @@ def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
----------
comm_df: pd.DataFrame
A dataframe of calls or SMSes.
group_by: list
A list of strings, specifying by which parameters to group.
By default, the features are calculated per participant, but could be "date_lj" etc.
Returns
-------
@ -179,13 +157,11 @@ def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
* the number of messages by type (received, sent), and
* the number of communication contacts by type.
"""
if group_by is None:
group_by = []
if "call_type" in comm_df:
data_type = "calls"
comm_counts = (
comm_df.value_counts(subset=group_by + ["participant_id", "call_type"])
.unstack(level="call_type", fill_value=0)
comm_df.value_counts(subset=["participant_id", "call_type"])
.unstack()
.rename(columns=call_types)
.add_prefix("no_")
)
@ -198,17 +174,17 @@ def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
)
# Ratio of incoming and outgoing calls to all calls.
comm_duration_total = (
comm_df.groupby(group_by + ["participant_id", "call_type"])
comm_df.groupby(["participant_id", "call_type"])
.sum()["call_duration"]
.unstack(level="call_type", fill_value=0)
.unstack()
.rename(columns=call_types)
.add_prefix("duration_total_")
)
# Total call duration by type.
comm_duration_max = (
comm_df.groupby(group_by + ["participant_id", "call_type"])
comm_df.groupby(["participant_id", "call_type"])
.max()["call_duration"]
.unstack(level="call_type", fill_value=0)
.unstack()
.rename(columns=call_types)
.add_prefix("duration_max_")
)
@ -226,8 +202,8 @@ def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
elif "message_type" in comm_df:
data_type = "sms"
comm_counts = (
comm_df.value_counts(subset=group_by + ["participant_id", "message_type"])
.unstack(level="message_type", fill_value=0)
comm_df.value_counts(subset=["participant_id", "message_type"])
.unstack()
.rename(columns=sms_types)
.add_prefix("no_")
)
@ -242,7 +218,7 @@ def count_comms(comm_df: pd.DataFrame, group_by=None) -> pd.DataFrame:
raise KeyError("The dataframe contains neither call_type or message_type")
comm_contacts_counts = (
enumerate_contacts(comm_df)
.groupby(group_by + ["participant_id"])
.groupby(["participant_id"])
.nunique()["contact_id"]
.rename("no_contacts_" + data_type)
)
@ -294,9 +270,7 @@ def contact_features(comm_df: pd.DataFrame) -> pd.DataFrame:
return contacts_count
def calls_sms_features(
df_calls: pd.DataFrame, df_sms: pd.DataFrame, group_by=None
) -> pd.DataFrame:
def calls_sms_features(df_calls: pd.DataFrame, df_sms: pd.DataFrame) -> pd.DataFrame:
"""
Calculates additional features relating calls and sms data.
@ -306,9 +280,6 @@ def calls_sms_features(
A dataframe of calls (return of get_call_data).
df_sms: pd.DataFrame
A dataframe of SMSes (return of get_sms_data).
group_by: list
A list of strings, specifying by which parameters to group.
By default, the features are calculated per participant, but could be "date_lj" etc.
Returns
-------
@ -326,20 +297,9 @@ def calls_sms_features(
* proportion_calls_contacts:
proportion of calls contacts in total number of communication contacts
"""
if group_by is None:
group_by = []
count_calls = count_comms(df_calls, group_by)
count_sms = count_comms(df_sms, group_by)
count_joined = (
count_calls.merge(
count_sms,
how="outer",
left_index=True,
right_index=True,
validate="one_to_one",
)
.fillna(0, downcast="infer")
.assign(
count_calls = count_comms(df_calls)
count_sms = count_comms(df_sms)
count_joined = count_calls.join(count_sms).assign(
proportion_calls_all=(
lambda x: x.no_calls_all / (x.no_calls_all + x.no_sms_all)
),
@ -353,11 +313,8 @@ def calls_sms_features(
lambda x: x.no_outgoing / (x.no_outgoing + x.no_sent)
),
proportion_calls_contacts=(
lambda x: x.no_contacts_calls
/ (x.no_contacts_calls + x.no_contacts_sms)
lambda x: x.no_contacts_calls / (x.no_contacts_calls + x.no_contacts_sms)
)
# Calculate new features and create additional columns
)
.fillna(0.5, downcast="infer")
)
return count_joined

View File

@ -5,12 +5,7 @@ import pandas as pd
from config.models import Participant, Proximity
from setup import db_engine, session
FILL_NA_PROXIMITY = {
"freq_prox_near": 0,
"prop_prox_near": 1 / 2, # Of the form of a / (a + b).
}
FEATURES_PROXIMITY = list(FILL_NA_PROXIMITY.keys())
FEATURES_PROXIMITY = ["freq_prox_near", "prop_prox_near"]
def get_proximity_data(usernames: Collection) -> pd.DataFrame:
@ -83,11 +78,11 @@ def count_proximity(
A dataframe with the count of "near" proximity values and their relative count.
"""
if group_by is None:
group_by = []
group_by = ["participant_id"]
if "bool_prox_near" not in df_proximity:
df_proximity = recode_proximity(df_proximity)
df_proximity["bool_prox_far"] = ~df_proximity["bool_prox_near"]
df_proximity_features = df_proximity.groupby(["participant_id"] + group_by).sum()[
df_proximity_features = df_proximity.groupby(group_by).sum()[
["bool_prox_near", "bool_prox_far"]
]
df_proximity_features = df_proximity_features.assign(

View File

@ -1,4 +1,4 @@
grouping_variable: [date_lj]
grouping_variable: date_lj
labels:
PANAS:
- PA

View File

@ -1,6 +0,0 @@
grouping_variable: [date_lj]
features:
proximity:
all
communication:
all

View File

@ -1,5 +0,0 @@
grouping_variable: [date_lj]
labels:
PANAS:
- PA
- NA

View File

@ -1,25 +1,13 @@
import datetime
import warnings
from collections.abc import Collection
from pathlib import Path
import numpy as np
import pandas as pd
import yaml
from pyprojroot import here
from sklearn import linear_model
from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
from sklearn.model_selection import cross_val_score
import participants.query_db
from features import communication, esm, helper, proximity
from machine_learning import QUESTIONNAIRE_IDS, QUESTIONNAIRE_IDS_RENAME
WARNING_PARTICIPANTS_LABEL = (
"Before calculating features, please set participants label using self.set_participants_label() "
"to be used as a filename prefix when exporting data. "
"The filename will be of the form: %participants_label_%grouping_variable_%data_type.csv"
)
class SensorFeatures:
def __init__(
@ -28,22 +16,16 @@ class SensorFeatures:
features: dict,
participants_usernames: Collection = None,
):
self.grouping_variable_name = grouping_variable
self.grouping_variable = [grouping_variable]
self.grouping_variable = grouping_variable
self.data_types = features.keys()
self.participants_label: str = ""
if participants_usernames is None:
participants_usernames = participants.query_db.get_usernames(
collection_start=datetime.date.fromisoformat("2020-08-01")
)
self.participants_label = "all"
self.participants_usernames = participants_usernames
self.df_features_all = pd.DataFrame()
self.df_proximity = pd.DataFrame()
self.df_proximity_counts = pd.DataFrame()
@ -51,28 +33,19 @@ class SensorFeatures:
self.df_sms = pd.DataFrame()
self.df_calls_sms = pd.DataFrame()
self.folder = None
self.filename_prefix = ""
self.construct_export_path()
print("SensorFeatures initialized.")
def set_sensor_data(self):
print("Querying database ...")
if "proximity" in self.data_types:
self.df_proximity = proximity.get_proximity_data(
self.participants_usernames
)
print("Got proximity data from the DB.")
self.df_proximity = helper.get_date_from_timestamp(self.df_proximity)
self.df_proximity = proximity.recode_proximity(self.df_proximity)
if "communication" in self.data_types:
self.df_calls = communication.get_call_data(self.participants_usernames)
self.df_calls = helper.get_date_from_timestamp(self.df_calls)
print("Got calls data from the DB.")
self.df_sms = communication.get_sms_data(self.participants_usernames)
self.df_sms = helper.get_date_from_timestamp(self.df_sms)
print("Got sms data from the DB.")
def get_sensor_data(self, data_type) -> pd.DataFrame:
if data_type == "proximity":
@ -83,41 +56,15 @@ class SensorFeatures:
raise KeyError("This data type has not been implemented.")
def calculate_features(self):
print("Calculating features ...")
if not self.participants_label:
raise ValueError(WARNING_PARTICIPANTS_LABEL)
if "proximity" in self.data_types:
self.df_proximity_counts = proximity.count_proximity(
self.df_proximity, self.grouping_variable
self.df_proximity, ["participant_id", self.grouping_variable]
)
self.df_features_all = safe_outer_merge_on_index(
self.df_features_all, self.df_proximity_counts
)
print("Calculated proximity features.")
to_csv_with_settings(
self.df_proximity, self.folder, self.filename_prefix, data_type="prox"
)
if "communication" in self.data_types:
self.df_calls_sms = communication.calls_sms_features(
df_calls=self.df_calls,
df_sms=self.df_sms,
group_by=self.grouping_variable,
)
self.df_features_all = safe_outer_merge_on_index(
self.df_features_all, self.df_calls_sms
)
print("Calculated communication features.")
to_csv_with_settings(
self.df_calls_sms, self.folder, self.filename_prefix, data_type="comm"
)
self.df_features_all.fillna(
value=proximity.FILL_NA_PROXIMITY, inplace=True, downcast="infer",
)
self.df_features_all.fillna(
value=communication.FILL_NA_CALLS_SMS_ALL, inplace=True, downcast="infer",
df_calls=self.df_calls, df_sms=self.df_sms
)
# TODO Think about joining dataframes.
def get_features(self, data_type, feature_names) -> pd.DataFrame:
if data_type == "proximity":
@ -128,28 +75,14 @@ class SensorFeatures:
if feature_names == "all":
feature_names = communication.FEATURES_CALLS_SMS_ALL
return self.df_calls_sms[feature_names]
elif data_type == "all":
return self.df_features_all
else:
raise KeyError("This data type has not been implemented.")
def construct_export_path(self):
if not self.participants_label:
warnings.warn(WARNING_PARTICIPANTS_LABEL, UserWarning)
self.folder = here("machine_learning/intermediate_results/features", warn=True)
self.filename_prefix = (
self.participants_label + "_" + self.grouping_variable_name
)
def set_participants_label(self, label: str):
self.participants_label = label
self.construct_export_path()
class Labels:
def __init__(
self,
grouping_variable: list,
grouping_variable: str,
labels: dict,
participants_usernames: Collection = None,
):
@ -168,15 +101,9 @@ class Labels:
self.df_esm_interest = pd.DataFrame()
self.df_esm_clean = pd.DataFrame()
self.df_esm_means = pd.DataFrame()
print("Labels initialized.")
def set_labels(self):
print("Querying database ...")
self.df_esm = esm.get_esm_data(self.participants_usernames)
print("Got ESM data from the DB.")
self.df_esm_preprocessed = esm.preprocess_esm(self.df_esm)
print("ESM data preprocessed.")
if "PANAS" in self.questionnaires:
self.df_esm_interest = self.df_esm_preprocessed[
(
@ -189,7 +116,6 @@ class Labels:
)
]
self.df_esm_clean = esm.clean_up_esm(self.df_esm_interest)
print("ESM data cleaned.")
def get_labels(self, questionnaire):
if questionnaire == "PANAS":
@ -197,131 +123,109 @@ class Labels:
else:
raise KeyError("This questionnaire has not been implemented as a label.")
def aggregate_labels(self):
print("Aggregating labels ...")
self.df_esm_means = (
self.df_esm_clean.groupby(
["participant_id", "questionnaire_id"] + self.grouping_variable
)
.esm_user_answer_numeric.agg("mean")
.reset_index()
.rename(columns={"esm_user_answer_numeric": "esm_numeric_mean"})
)
self.df_esm_means = (
self.df_esm_means.pivot(
index=["participant_id"] + self.grouping_variable,
columns="questionnaire_id",
values="esm_numeric_mean",
)
.reset_index(col_level=1)
.rename(columns=QUESTIONNAIRE_IDS_RENAME)
.set_index(["participant_id"] + self.grouping_variable)
)
print("Labels aggregated.")
def get_aggregated_labels(self):
return self.df_esm_means
class MachineLearningPipeline:
def __init__(
self,
labels_questionnaire,
labels_scale,
data_types,
participants_usernames=None,
feature_names=None,
grouping_variable=None,
):
if participants_usernames is None:
participants_usernames = participants.query_db.get_usernames(
collection_start=datetime.date.fromisoformat("2020-08-01")
)
self.participants_usernames = participants_usernames
self.labels_questionnaire = labels_questionnaire
self.data_types = data_types
if feature_names is None:
self.feature_names = []
self.df_features = pd.DataFrame()
self.labels_scale = labels_scale
self.df_labels = pd.DataFrame()
self.grouping_variable = grouping_variable
self.df_groups = pd.DataFrame()
class ModelValidation:
def __init__(self, X, y, group_variable=None, cv_name="loso"):
self.model = None
self.cv = None
self.validation_method = None
idx_common = X.index.intersection(y.index)
self.y = y.loc[idx_common, "NA"]
# TODO Handle the case of multiple labels.
self.X = X.loc[idx_common]
self.groups = self.y.index.get_level_values(group_variable)
self.df_esm = pd.DataFrame()
self.df_esm_preprocessed = pd.DataFrame()
self.df_esm_interest = pd.DataFrame()
self.df_esm_clean = pd.DataFrame()
self.cv_name = cv_name
print("ModelValidation initialized.")
self.df_full_data_daily_means = pd.DataFrame()
self.df_esm_daily_means = pd.DataFrame()
self.df_proximity_daily_counts = pd.DataFrame()
def set_cv_method(self):
if self.cv_name == "loso":
self.cv = LeaveOneGroupOut()
self.cv.get_n_splits(X=self.X, y=self.y, groups=self.groups)
print("Validation method set.")
# def get_labels(self):
# self.df_esm = esm.get_esm_data(self.participants_usernames)
# self.df_esm_preprocessed = esm.preprocess_esm(self.df_esm)
# if self.labels_questionnaire == "PANAS":
# self.df_esm_interest = self.df_esm_preprocessed[
# (
# self.df_esm_preprocessed["questionnaire_id"]
# == QUESTIONNAIRE_IDS.get("PANAS").get("PA")
# )
# | (
# self.df_esm_preprocessed["questionnaire_id"]
# == QUESTIONNAIRE_IDS.get("PANAS").get("NA")
# )
# ]
# self.df_esm_clean = esm.clean_up_esm(self.df_esm_interest)
def cross_validate(self):
print("Running cross validation ...")
if self.model is None:
raise TypeError(
"Please, specify a machine learning model first, by setting the .model attribute. "
"E.g. self.model = sklearn.linear_model.LinearRegression()"
)
if self.cv is None:
raise TypeError(
"Please, specify a cross validation method first, by using set_cv_method() first."
)
if self.X.isna().any().any() or self.y.isna().any().any():
raise ValueError(
"NaNs were found in either X or y. Please, check your data before continuing."
)
return cross_val_score(
estimator=self.model,
X=self.X,
y=self.y,
groups=self.groups,
cv=self.cv,
n_jobs=-1,
scoring="r2",
)
# def aggregate_daily(self):
# self.df_esm_daily_means = (
# self.df_esm_clean.groupby(["participant_id", "date_lj", "questionnaire_id"])
# .esm_user_answer_numeric.agg("mean")
# .reset_index()
# .rename(columns={"esm_user_answer_numeric": "esm_numeric_mean"})
# )
# self.df_esm_daily_means = (
# self.df_esm_daily_means.pivot(
# index=["participant_id", "date_lj"],
# columns="questionnaire_id",
# values="esm_numeric_mean",
# )
# .reset_index(col_level=1)
# .rename(columns=QUESTIONNAIRE_IDS_RENAME)
# .set_index(["participant_id", "date_lj"])
# )
# self.df_full_data_daily_means = self.df_esm_daily_means.copy()
# if "proximity" in self.data_types:
# self.df_proximity_daily_counts = proximity.count_proximity(
# self.df_proximity, ["participant_id", "date_lj"]
# )
# self.df_full_data_daily_means = self.df_full_data_daily_means.join(
# self.df_proximity_daily_counts
# )
def safe_outer_merge_on_index(left, right):
if left.empty:
return right
elif right.empty:
return left
def assign_columns(self):
self.df_features = self.df_full_data_daily_means[self.feature_names]
self.df_labels = self.df_full_data_daily_means[self.labels_scale]
if self.grouping_variable:
self.df_groups = self.df_full_data_daily_means[self.grouping_variable]
else:
return pd.merge(
left,
right,
how="outer",
left_index=True,
right_index=True,
validate="one_to_one",
self.df_groups = None
def validate_model(self):
if self.model is None:
raise AttributeError(
"Please, specify a machine learning model first, by setting the .model attribute."
)
def to_csv_with_settings(
df: pd.DataFrame, folder: Path, filename_prefix: str, data_type: str
) -> None:
export_filename = filename_prefix + "_" + data_type + ".csv"
full_path = folder / export_filename
df.to_csv(
path_or_buf=full_path,
sep=",",
na_rep="NA",
header=True,
index=False,
encoding="utf-8",
if self.validation_method is None:
raise AttributeError(
"Please, specify a cross validation method first, by setting the .validation_method attribute."
)
print("Exported the dataframe to " + str(full_path))
if __name__ == "__main__":
with open("./config/prox_comm_PANAS_features.yaml", "r") as file:
sensor_features_params = yaml.safe_load(file)
sensor_features = SensorFeatures(**sensor_features_params)
sensor_features.set_sensor_data()
sensor_features.calculate_features()
with open("./config/prox_comm_PANAS_labels.yaml", "r") as file:
labels_params = yaml.safe_load(file)
labels = Labels(**labels_params)
labels.set_labels()
labels.aggregate_labels()
model_validation = ModelValidation(
sensor_features.get_features("all", "all"),
labels.get_aggregated_labels(),
group_variable="participant_id",
cv_name="loso",
cross_val_score(
estimator=self.model,
X=self.df_features,
y=self.df_labels,
groups=self.df_groups,
cv=self.validation_method,
n_jobs=-1,
)
model_validation.model = linear_model.LinearRegression()
model_validation.set_cv_method()
model_loso_r2 = model_validation.cross_validate()
print(model_loso_r2)
print(np.mean(model_loso_r2))