diff --git a/data/external/participant_files/p01.yaml b/data/external/participant_files/p01.yaml index 0462a255..04f47f8f 100644 --- a/data/external/participant_files/p01.yaml +++ b/data/external/participant_files/p01.yaml @@ -1,9 +1,9 @@ PHONE: - DEVICE_IDS: [70cc5183-97d4-4678-b81e-a34e491e2868,d5bbb2ab-2d60-4e72-a636-17655395c401,93fae5bc-e5a9-4751-b768-fd55c821f126] - PLATFORMS: [android,android,android] - LABEL: uploader_57312 - START_DATE: 2020-09-24 11:56:45 - END_DATE: 2020-10-24 19:19:37 + DEVICE_IDS: [4b62a655-cbf0-4ac0-a448-06726f45b56a] + PLATFORMS: [android] + LABEL: uploader_53573 + START_DATE: 2021-05-21 09:21:24 + END_DATE: 2021-07-12 17:32:07 EMPATICA: DEVICE_IDS: [empatica1] LABEL: test01 diff --git a/src/features/all_cleaning_individual/straw/main.py b/src/features/all_cleaning_individual/straw/main.py index 93f7d54f..6a1f7402 100644 --- a/src/features/all_cleaning_individual/straw/main.py +++ b/src/features/all_cleaning_individual/straw/main.py @@ -23,14 +23,11 @@ def straw_cleaning(sensor_data_files, provider): excluded_columns = ['local_segment', 'local_segment_label', 'local_segment_start_datetime', 'local_segment_end_datetime'] # (1) FILTER_OUT THE ROWS THAT DO NOT HAVE THE TARGET COLUMN AVAILABLE - # if config['PARAMS_FOR_ANALYSIS']['TARGET']['COMPUTE']: - # target = config['PARAMS_FOR_ANALYSIS']['TARGET']['LABEL'] # get target label from config - # features = features[features['phone_esm_straw_' + target].notna()].reset_index(drop=True) + if config['PARAMS_FOR_ANALYSIS']['TARGET']['COMPUTE']: + target = config['PARAMS_FOR_ANALYSIS']['TARGET']['LABEL'] # get target label from config + features = features[features['phone_esm_straw_' + target].notna()].reset_index(drop=True) - # (2) REMOVE COLS IF THEIR NAN THRESHOLD IS PASSED (should be <= if even all NaN columns must be preserved - this solution now drops columns with all NaN rows) - features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] - - # (3.1) QUALITY CHECK (DATA YIELD COLUMN) deletes the rows where E4 or phone data is low quality + # (2.1) QUALITY CHECK (DATA YIELD COLUMN) deletes the rows where E4 or phone data is low quality phone_data_yield_unit = provider["PHONE_DATA_YIELD_FEATURE"].split("_")[3].lower() phone_data_yield_column = "phone_data_yield_rapids_ratiovalidyielded" + phone_data_yield_unit @@ -40,10 +37,10 @@ def straw_cleaning(sensor_data_files, provider): raise KeyError(f"RAPIDS provider needs to clean the selected event features based on {phone_data_yield_column} column, please set config[PHONE_DATA_YIELD][PROVIDERS][RAPIDS][COMPUTE] to True and include 'ratiovalidyielded{data_yield_unit}' in [FEATURES].") if provider["PHONE_DATA_YIELD_RATIO_THRESHOLD"]: - features = features[features[phone_data_yield_column] >= provider["PHONE_DATA_YIELD_RATIO_THRESHOLD"]] + features = features[features[phone_data_yield_column] >= provider["PHONE_DATA_YIELD_RATIO_THRESHOLD"]].reset_index(drop=True) if provider["EMPATICA_DATA_YIELD_RATIO_THRESHOLD"]: - features = features[features["empatica_data_yield"] >= provider["EMPATICA_DATA_YIELD_RATIO_THRESHOLD"]] + features = features[features["empatica_data_yield"] >= provider["EMPATICA_DATA_YIELD_RATIO_THRESHOLD"]].reset_index(drop=True) # ---> imputation ?? @@ -67,57 +64,75 @@ def straw_cleaning(sensor_data_files, provider): # print(features[features['phone_data_yield_rapids_ratiovalidyieldedminutes'] > impute_phone_features['MIN_DATA_YIELDED_MINUTES_TO_IMPUTE']][phone_cols]) - # (3.2) (optional) DOES ROW CONSIST OF ENOUGH NON-NAN VALUES? Possible some of these examples could still pass previous condition but not this one? + # (2.2) (optional) DOES ROW CONSIST OF ENOUGH NON-NAN VALUES? Possible some of these examples could still pass previous condition but not this one? min_count = math.ceil((1 - provider["ROWS_NAN_THRESHOLD"]) * features.shape[1]) # minimal not nan values in row features.dropna(axis=0, thresh=min_count, inplace=True) - # (4) IMPUTATION: IMPUTE DATA WITH KNN METHOD (TODO: for now only kNN) - # - no other input restriction for this method except that rows are full enough and have reasonably high quality as assessed by data yield - graph_bf_af(features, "before_knn") + # (3) REMOVE COLS IF THEIR NAN THRESHOLD IS PASSED (should be <= if even all NaN columns must be preserved - this solution now drops columns with all NaN rows) + esm_cols = features.loc[:, features.columns.str.startswith('phone_esm_straw')] # Get target (esm) columns - impute_cols = [col for col in features.columns if col not in excluded_columns] - features[impute_cols] = impute(features[impute_cols], method="knn") + features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] - graph_bf_af(features, "after_knn") - - # (5) REMOVE COLS WHERE VARIANCE IS 0 - if provider["COLS_VAR_THRESHOLD"]: - features.drop(features.std()[features.std() == 0].index.values, axis=1, inplace=True) - # Preserve esm cols if deleted (has to come after drop cols operations) for esm in esm_cols: if esm not in features: features[esm] = esm_cols[esm] + + graph_bf_af(features, "before_knn") + + ## (4) STANDARDIZATION + if provider["STANDARDIZATION"]: + features.loc[:, ~features.columns.isin(excluded_columns)] = StandardScaler().fit_transform(features.loc[:, ~features.columns.isin(excluded_columns)]) + + # (5) IMPUTATION: IMPUTE DATA WITH KNN METHOD (TODO: for now only kNN) + # - no other input restriction for this method except that rows are full enough and have reasonably high quality as assessed by data yield + impute_cols = [col for col in features.columns if col not in excluded_columns] + features[impute_cols] = impute(features[impute_cols], method="knn") + + graph_bf_af(features, "after_knn") + # (6) REMOVE COLS WHERE VARIANCE IS 0 + esm_cols = features.loc[:, features.columns.str.startswith('phone_esm_straw')] + + if provider["COLS_VAR_THRESHOLD"]: + features.drop(features.std()[features.std() == 0].index.values, axis=1, inplace=True) + + graph_bf_af(features, "before_corr") - # (6) DROP HIGHLY CORRELATED FEATURES + # (7) DROP HIGHLY CORRELATED FEATURES drop_corr_features = provider["DROP_HIGHLY_CORRELATED_FEATURES"] - if drop_corr_features["COMPUTE"]: + if drop_corr_features["COMPUTE"] and features.shape[0] >= 3: numerical_cols = features.select_dtypes(include=np.number).columns.tolist() # Remove columns where NaN count threshold is passed valid_features = features[numerical_cols].loc[:, features[numerical_cols].isna().sum() < drop_corr_features['MIN_OVERLAP_FOR_CORR_THRESHOLD'] * features[numerical_cols].shape[0]] - cor_matrix = valid_features.corr(method='spearman').abs() - upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) - to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > drop_corr_features["CORR_THRESHOLD"])] + corr_matrix = valid_features.corr().abs() + upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) + to_drop = [column for column in upper.columns if any(upper[column] > drop_corr_features["CORR_THRESHOLD"])] features.drop(to_drop, axis=1, inplace=True) - ## (8) STANDARDIZATION - if provider["STANDARDIZATION"]: - features.loc[:, ~features.columns.isin(excluded_columns)] = StandardScaler().fit_transform(features.loc[:, ~features.columns.isin(excluded_columns)]) + graph_bf_af(features, "after_corr") - # (9) VERIFY IF THERE ARE ANY NANS LEFT IN THE DATAFRAME + # Preserve esm cols if deleted (has to come after drop cols operations) + for esm in esm_cols: + if esm not in features: + features[esm] = esm_cols[esm] + + + # (8) VERIFY IF THERE ARE ANY NANS LEFT IN THE DATAFRAME if features.isna().any().any(): raise ValueError + sys.exit() + return features def graph_bf_af(features, phase_name): sns.set(rc={"figure.figsize":(16, 8)}) print(features) - sns.heatmap(features.isna(), cbar=False) + sns.heatmap(features.isna(), cbar=False) #features.select_dtypes(include=np.number) plt.savefig(f'features_nans_{phase_name}.png', bbox_inches='tight') def impute(df, method='zero'):