From fb577bc9adf8c970f0b74464dbeacf1b8982b1e2 Mon Sep 17 00:00:00 2001 From: Primoz Date: Fri, 22 Jul 2022 15:31:30 +0000 Subject: [PATCH] Squashed commit of the following: commit 43ecc243cb62bb31eed85cb477ca4131555c7fe7 Author: Primoz Date: Fri Jul 22 15:26:09 2022 +0000 Adding TODO comments commit 2df1ebf90c3a93812b112b8ed0ee4e23cd74533f Author: Primoz Date: Thu Jul 21 13:59:23 2022 +0000 README update commit 5182c2b16dff3537aad42984b8ea5214743cdb32 Author: Primoz Date: Thu Jul 21 11:03:01 2022 +0000 Few corrections for all_cleaning commit 3d9254c1b3bed6e95e631d4e0402548830a19534 Author: Primoz Date: Thu Jul 21 10:28:05 2022 +0000 Adding the min overlap for corr threshold and preservation of esm cols. commit e27c49cc8fa4c51f9fe8e593a8d25e9a032ab393 Author: Primoz Date: Thu Jul 21 09:02:00 2022 +0000 Commenting and cleaning. commit 31a47a5ee4569264e39d7c445525a6e64bb7700a Author: Primoz Date: Wed Jul 20 13:49:22 2022 +0000 Environment version change. commit 5b274ed8993f58e783bda6d82fce936764209c28 Author: Primoz Date: Tue Jul 19 16:10:07 2022 +0000 Enabled cleaning for all participants + standardization files. commit 203fdb31e0f3c647ef8c8a60cb9531831b7ab924 Author: Primoz Date: Tue Jul 19 14:14:51 2022 +0000 Features cleaning fixes after testing. Visualization script for phone features values. commit 176178d73b154c30b9eb9eb4a67514f00d6a924e Author: Primoz Date: Tue Jul 19 09:05:14 2022 +0000 Revert "Necessary config changes." This reverts commit 6ec1ef50430d2e1f5ce4670d505d5e84ac47f0a0. commit 26ea6512c9d512f95837e7b047fe510c1d196403 Author: Primoz Date: Mon Jul 18 13:19:47 2022 +0000 Adding cleaning function condition and cleaning functionality. commit 575c29eef9c21e6f2d7832871e73bc0941643734 Author: Primoz Date: Mon Jul 18 12:51:56 2022 +0000 Translation of the cleaning individual RAPIDS function from R to py. commit 6ec1ef50430d2e1f5ce4670d505d5e84ac47f0a0 Author: Primoz Date: Mon Jul 18 12:02:18 2022 +0000 Necessary config changes. commit b5669f51612fbd8378848615d639677851ab032f Author: Primoz Date: Fri Jul 15 15:26:00 2022 +0000 Modified snakemake rule to dynamically choose script extention. commit 66636be1e8ae4828228b37c59b9df1faf3fc3d3d Author: Primoz Date: Fri Jul 15 14:43:08 2022 +0000 Trying to modify the snakefile rule to execute scripts in two languages depended on the provider. commit 574778b00f3cbb368ef4bc74de15cf5070c65ea9 Author: Primoz Date: Fri Jul 15 09:49:41 2022 +0000 gitignore: adding required files so that RAPIDS can be run successfully. commit 71018ab178256970535e78961602ab8c7f0ebb14 Author: Primoz Date: Fri Jul 15 08:34:19 2022 +0000 Standardization bug fixes commit 6253c470a624e6bfbb02e0c453b652452eb2dbbc Author: Primoz Date: Thu Jul 14 15:28:02 2022 +0000 Seperate rules for empatica vs. nonempatica standardization. Parameter in config that controls the creation of standardized merged files for individual and all participants.. commit 90f902778565e0896d3bae22ae8551be8b487e67 Author: Primoz Date: Tue Jul 12 14:23:03 2022 +0000 Preparing for final csvs' standardization. commit d25dde3998786a9a582f5cda544ee104386778f9 Author: Primoz Date: Mon Jul 11 12:08:47 2022 +0000 Revert "Changes in config to be reverted." This reverts commit bea7608e7095021fb7c53a9afa07074448fe4313. commit 6b23e70857e63deda98eb98d190af9090626c84b Author: Primoz Date: Mon Jul 11 12:08:26 2022 +0000 Enabled standardization for rest (previously active) phone features. Testing still needed. commit 8ec58a6f34ba3d42e5cc71d26e6d91837472ca5f Author: Primoz Date: Mon Jul 11 09:07:55 2022 +0000 Enabled standardization for phone calls. All steps completed and tested. commit bea7608e7095021fb7c53a9afa07074448fe4313 Author: Primoz Date: Mon Jul 11 07:47:51 2022 +0000 Changes in config to be reverted. commit 4e84ca0e51bf709bff56fd09437b95310ec6bedd Author: Primoz Date: Fri Jul 8 14:11:24 2022 +0000 Standardization for the rest of the features. commit cc581aa788e3d5c17131af8f3d5dd6b0c3b5aff7 Author: Primoz Date: Fri Jul 8 14:11:08 2022 +0000 README update again --- README.md | 1 - config.yaml | 14 ++++---- credentials.yaml | 6 ---- .../all_cleaning_individual/straw/main.py | 35 +++++++++++++------ .../all_cleaning_overall/straw/main.py | 19 +++++++--- 5 files changed, 46 insertions(+), 29 deletions(-) delete mode 100644 credentials.yaml diff --git a/README.md b/README.md index 23a00b27..8176c591 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,6 @@ Then follow the "cr-features module" section below. This RAPIDS extension uses cr-features library accessible [here](https://repo.ijs.si/matjazbostic/calculatingfeatures). To use cr-features library: -- For now, use the "modifications_for_rapids" branch to get the newest version of cr-features that is functional for RAPIDS-STRAW analysis. - Follow the installation instructions in the [README.md](https://repo.ijs.si/matjazbostic/calculatingfeatures/-/blob/master/README.md). diff --git a/config.yaml b/config.yaml index 1bac3f95..bcd4de4e 100644 --- a/config.yaml +++ b/config.yaml @@ -680,11 +680,11 @@ ALL_CLEANING_INDIVIDUAL: DATA_YIELD_FEATURE: RATIO_VALID_YIELDED_HOURS # RATIO_VALID_YIELDED_HOURS or RATIO_VALID_YIELDED_MINUTES DATA_YIELD_RATIO_THRESHOLD: 0.3 # set to 0 to disable DROP_HIGHLY_CORRELATED_FEATURES: - COMPUTE: False + COMPUTE: True MIN_OVERLAP_FOR_CORR_THRESHOLD: 0.5 CORR_THRESHOLD: 0.95 SRC_SCRIPT: src/features/all_cleaning_individual/rapids/main.R - STRAW: + STRAW: # currently the same as RAPIDS provider with a change in selecting the imputation type + is not considering MIN_OVERLAP_FOR_CORR_THRESHOLD param and does not have special treatment for phone_esm (see RAPIDS script) COMPUTE: True IMPUTE_PHONE_SELECTED_EVENT_FEATURES: COMPUTE: True @@ -715,11 +715,11 @@ ALL_CLEANING_OVERALL: DATA_YIELD_FEATURE: RATIO_VALID_YIELDED_HOURS # RATIO_VALID_YIELDED_HOURS or RATIO_VALID_YIELDED_MINUTES DATA_YIELD_RATIO_THRESHOLD: 0.3 # set to 0 to disable DROP_HIGHLY_CORRELATED_FEATURES: - COMPUTE: False + COMPUTE: True MIN_OVERLAP_FOR_CORR_THRESHOLD: 0.5 CORR_THRESHOLD: 0.95 SRC_SCRIPT: src/features/all_cleaning_overall/rapids/main.R - STRAW: + STRAW: # currently the same as RAPIDS provider with a change in selecting the imputation type + is not considering MIN_OVERLAP_FOR_CORR_THRESHOLD param COMPUTE: True IMPUTE_PHONE_SELECTED_EVENT_FEATURES: COMPUTE: True @@ -741,7 +741,7 @@ ALL_CLEANING_OVERALL: # Z-score standardization # ######################################################################################################################## -STANDARDIZATION: +STANDARDIZATION: # Standardization for both providers is executed if only one of two providers is marked COMPUTE: TRUE MERGE_ALL: True # Creates the joint standardized file for each participant and all participants. Similar to merge_sensor_features_for_all_participants rule PROVIDERS: CR: @@ -759,7 +759,7 @@ STANDARDIZATION: PARAMS_FOR_ANALYSIS: BASELINE: - COMPUTE: False + COMPUTE: True FOLDER: data/external/baseline CONTAINER: [results-survey637813_final.csv, # Slovenia results-survey358134_final.csv, # Belgium 1 @@ -770,5 +770,5 @@ PARAMS_FOR_ANALYSIS: CATEGORICAL_FEATURES: [gender] TARGET: - COMPUTE: False + COMPUTE: True LABEL: PANAS_negative_affect_mean diff --git a/credentials.yaml b/credentials.yaml deleted file mode 100644 index 3a1c51e0..00000000 --- a/credentials.yaml +++ /dev/null @@ -1,6 +0,0 @@ -PSQL_STRAW: - database: staw - user: staw_db - password: kizi-x2yf-mate - host: 212.235.208.113 - port: 5432 diff --git a/src/features/all_cleaning_individual/straw/main.py b/src/features/all_cleaning_individual/straw/main.py index 2c991afd..f82a355d 100644 --- a/src/features/all_cleaning_individual/straw/main.py +++ b/src/features/all_cleaning_individual/straw/main.py @@ -6,12 +6,17 @@ def straw_cleaning(sensor_data_files, provider): features = pd.read_csv(sensor_data_files["sensor_data"][0]) + # TODO: reorder the cleaning steps so it makes sense for the analysis + # TODO: add conditions that differentiates cleaning steps for standardized and nonstandardized features, for this + # the snakemake rules will also have to come with additional parameter (in rules/features.smk) + # Impute selected features event impute_phone_features = provider["IMPUTE_PHONE_SELECTED_EVENT_FEATURES"] if impute_phone_features["COMPUTE"]: if not 'phone_data_yield_rapids_ratiovalidyieldedminutes' in features.columns: raise KeyError("RAPIDS provider needs to impute the selected event features based on phone_data_yield_rapids_ratiovalidyieldedminutes column, please set config[PHONE_DATA_YIELD][PROVIDERS][RAPIDS][COMPUTE] to True and include 'ratiovalidyieldedminutes' in [FEATURES].") + # TODO: if the type of the imputation will vary for different groups of features make conditional imputations here phone_cols = [col for col in features if \ col.startswith('phone_applications_foreground_rapids_') or col.startswith('phone_battery_rapids_') or @@ -22,7 +27,7 @@ def straw_cleaning(sensor_data_files, provider): col.startswith('phone_wifi_')] mask = features['phone_data_yield_rapids_ratiovalidyieldedminutes'] > impute_phone_features['MIN_DATA_YIELDED_MINUTES_TO_IMPUTE'] - features.loc[mask, phone_cols] = impute(features[mask][phone_cols], method=impute_phone_features["TYPE"]) + features.loc[mask, phone_cols] = impute(features[mask][phone_cols], method=impute_phone_features["TYPE"].lower()) # Drop rows with the value of data_yield_column less than data_yield_ratio_threshold data_yield_unit = provider["DATA_YIELD_FEATURE"].split("_")[3].lower() @@ -33,40 +38,50 @@ def straw_cleaning(sensor_data_files, provider): features = features[features[data_yield_column] >= provider["DATA_YIELD_RATIO_THRESHOLD"]] - # Remove cols if threshold of NaN values is passed - features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] + esm_cols = features.loc[:, features.columns.str.startswith('phone_esm')] # For later preservation of esm_cols + # Remove cols if threshold of NaN values is passed + features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] + # Remove cols where variance is 0 if provider["COLS_VAR_THRESHOLD"]: features.drop(features.std()[features.std() == 0].index.values, axis=1, inplace=True) + # Preserve esm cols if deleted (has to come after drop cols operations) + for esm in esm_cols: + if esm not in features: + features[esm] = esm_cols[esm] + # Drop highly correlated features - To-Do še en thershold var, ki je v config + kako se tretirajo NaNs? drop_corr_features = provider["DROP_HIGHLY_CORRELATED_FEATURES"] if drop_corr_features["COMPUTE"]: + numerical_cols = features.select_dtypes(include=np.number).columns.tolist() - cor_matrix = features[numerical_cols].corr(method='spearman').abs() + # Remove columns where NaN count threshold is passed + valid_features = features[numerical_cols].loc[:, features[numerical_cols].isna().sum() < drop_corr_features['MIN_OVERLAP_FOR_CORR_THRESHOLD'] * features[numerical_cols].shape[0]] + cor_matrix = valid_features.corr(method='spearman').abs() upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) - to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > drop_corr_features["CORR_THRESHOLD"])] - # Tukaj je še neka validacija s thresholdom, ampak ne razumem R kode "valid_pairs" features.drop(to_drop, axis=1, inplace=True) # Remove rows if threshold of NaN values is passed - min_count = math.ceil((1 - provider["ROWS_NAN_THRESHOLD"]) * features.shape[1]) # min not nan values in row + min_count = math.ceil((1 - provider["ROWS_NAN_THRESHOLD"]) * features.shape[1]) # minimal not nan values in row features.dropna(axis=0, thresh=min_count, inplace=True) return features def impute(df, method='zero'): - df.loc[:, df.isna().all()] = df.loc[:, df.isna().all()].fillna(0) # if column contains only NaN values impute it with 0 + + def k_nearest(df): # TODO: if needed, implement k-nearest imputation / interpolation + pass + return { # rest of the columns should be imputed with the selected method 'zero': df.fillna(0), 'mean': df.fillna(df.mean()), 'median': df.fillna(df.median()), - 'k-nearest': None # To-Do + 'k-nearest': k_nearest(df) }[method] - diff --git a/src/features/all_cleaning_overall/straw/main.py b/src/features/all_cleaning_overall/straw/main.py index 2c991afd..a9a4574c 100644 --- a/src/features/all_cleaning_overall/straw/main.py +++ b/src/features/all_cleaning_overall/straw/main.py @@ -6,12 +6,17 @@ def straw_cleaning(sensor_data_files, provider): features = pd.read_csv(sensor_data_files["sensor_data"][0]) + # TODO: reorder the cleaning steps so it makes sense for the analysis + # TODO: add conditions that differentiates cleaning steps for standardized and nonstandardized features, for this + # the snakemake rules will also have to come with additional parameter (in rules/features.smk) + # Impute selected features event impute_phone_features = provider["IMPUTE_PHONE_SELECTED_EVENT_FEATURES"] if impute_phone_features["COMPUTE"]: if not 'phone_data_yield_rapids_ratiovalidyieldedminutes' in features.columns: raise KeyError("RAPIDS provider needs to impute the selected event features based on phone_data_yield_rapids_ratiovalidyieldedminutes column, please set config[PHONE_DATA_YIELD][PROVIDERS][RAPIDS][COMPUTE] to True and include 'ratiovalidyieldedminutes' in [FEATURES].") + # TODO: if the type of the imputation will vary for different groups of features make conditional imputations here phone_cols = [col for col in features if \ col.startswith('phone_applications_foreground_rapids_') or col.startswith('phone_battery_rapids_') or @@ -43,15 +48,16 @@ def straw_cleaning(sensor_data_files, provider): # Drop highly correlated features - To-Do še en thershold var, ki je v config + kako se tretirajo NaNs? drop_corr_features = provider["DROP_HIGHLY_CORRELATED_FEATURES"] if drop_corr_features["COMPUTE"]: + numerical_cols = features.select_dtypes(include=np.number).columns.tolist() - cor_matrix = features[numerical_cols].corr(method='spearman').abs() + # Remove columns where NaN count threshold is passed + valid_features = features[numerical_cols].loc[:, features[numerical_cols].isna().sum() < drop_corr_features['MIN_OVERLAP_FOR_CORR_THRESHOLD'] * features[numerical_cols].shape[0]] + cor_matrix = valid_features.corr(method='spearman').abs() upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) - to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > drop_corr_features["CORR_THRESHOLD"])] - # Tukaj je še neka validacija s thresholdom, ampak ne razumem R kode "valid_pairs" features.drop(to_drop, axis=1, inplace=True) # Remove rows if threshold of NaN values is passed @@ -61,12 +67,15 @@ def straw_cleaning(sensor_data_files, provider): return features def impute(df, method='zero'): - df.loc[:, df.isna().all()] = df.loc[:, df.isna().all()].fillna(0) # if column contains only NaN values impute it with 0 + + def k_nearest(df): # TODO: if needed implement k-nearest imputation / interpolation + pass + return { # rest of the columns should be imputed with the selected method 'zero': df.fillna(0), 'mean': df.fillna(df.mean()), 'median': df.fillna(df.median()), - 'k-nearest': None # To-Do + 'k-nearest': k_nearest(df) }[method]