diff --git a/README.md b/README.md index 23a00b27..8176c591 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,6 @@ Then follow the "cr-features module" section below. This RAPIDS extension uses cr-features library accessible [here](https://repo.ijs.si/matjazbostic/calculatingfeatures). To use cr-features library: -- For now, use the "modifications_for_rapids" branch to get the newest version of cr-features that is functional for RAPIDS-STRAW analysis. - Follow the installation instructions in the [README.md](https://repo.ijs.si/matjazbostic/calculatingfeatures/-/blob/master/README.md). diff --git a/config.yaml b/config.yaml index 1bac3f95..bcd4de4e 100644 --- a/config.yaml +++ b/config.yaml @@ -680,11 +680,11 @@ ALL_CLEANING_INDIVIDUAL: DATA_YIELD_FEATURE: RATIO_VALID_YIELDED_HOURS # RATIO_VALID_YIELDED_HOURS or RATIO_VALID_YIELDED_MINUTES DATA_YIELD_RATIO_THRESHOLD: 0.3 # set to 0 to disable DROP_HIGHLY_CORRELATED_FEATURES: - COMPUTE: False + COMPUTE: True MIN_OVERLAP_FOR_CORR_THRESHOLD: 0.5 CORR_THRESHOLD: 0.95 SRC_SCRIPT: src/features/all_cleaning_individual/rapids/main.R - STRAW: + STRAW: # currently the same as RAPIDS provider with a change in selecting the imputation type + is not considering MIN_OVERLAP_FOR_CORR_THRESHOLD param and does not have special treatment for phone_esm (see RAPIDS script) COMPUTE: True IMPUTE_PHONE_SELECTED_EVENT_FEATURES: COMPUTE: True @@ -715,11 +715,11 @@ ALL_CLEANING_OVERALL: DATA_YIELD_FEATURE: RATIO_VALID_YIELDED_HOURS # RATIO_VALID_YIELDED_HOURS or RATIO_VALID_YIELDED_MINUTES DATA_YIELD_RATIO_THRESHOLD: 0.3 # set to 0 to disable DROP_HIGHLY_CORRELATED_FEATURES: - COMPUTE: False + COMPUTE: True MIN_OVERLAP_FOR_CORR_THRESHOLD: 0.5 CORR_THRESHOLD: 0.95 SRC_SCRIPT: src/features/all_cleaning_overall/rapids/main.R - STRAW: + STRAW: # currently the same as RAPIDS provider with a change in selecting the imputation type + is not considering MIN_OVERLAP_FOR_CORR_THRESHOLD param COMPUTE: True IMPUTE_PHONE_SELECTED_EVENT_FEATURES: COMPUTE: True @@ -741,7 +741,7 @@ ALL_CLEANING_OVERALL: # Z-score standardization # ######################################################################################################################## -STANDARDIZATION: +STANDARDIZATION: # Standardization for both providers is executed if only one of two providers is marked COMPUTE: TRUE MERGE_ALL: True # Creates the joint standardized file for each participant and all participants. Similar to merge_sensor_features_for_all_participants rule PROVIDERS: CR: @@ -759,7 +759,7 @@ STANDARDIZATION: PARAMS_FOR_ANALYSIS: BASELINE: - COMPUTE: False + COMPUTE: True FOLDER: data/external/baseline CONTAINER: [results-survey637813_final.csv, # Slovenia results-survey358134_final.csv, # Belgium 1 @@ -770,5 +770,5 @@ PARAMS_FOR_ANALYSIS: CATEGORICAL_FEATURES: [gender] TARGET: - COMPUTE: False + COMPUTE: True LABEL: PANAS_negative_affect_mean diff --git a/credentials.yaml b/credentials.yaml deleted file mode 100644 index 3a1c51e0..00000000 --- a/credentials.yaml +++ /dev/null @@ -1,6 +0,0 @@ -PSQL_STRAW: - database: staw - user: staw_db - password: kizi-x2yf-mate - host: 212.235.208.113 - port: 5432 diff --git a/src/features/all_cleaning_individual/straw/main.py b/src/features/all_cleaning_individual/straw/main.py index 2c991afd..f82a355d 100644 --- a/src/features/all_cleaning_individual/straw/main.py +++ b/src/features/all_cleaning_individual/straw/main.py @@ -6,12 +6,17 @@ def straw_cleaning(sensor_data_files, provider): features = pd.read_csv(sensor_data_files["sensor_data"][0]) + # TODO: reorder the cleaning steps so it makes sense for the analysis + # TODO: add conditions that differentiates cleaning steps for standardized and nonstandardized features, for this + # the snakemake rules will also have to come with additional parameter (in rules/features.smk) + # Impute selected features event impute_phone_features = provider["IMPUTE_PHONE_SELECTED_EVENT_FEATURES"] if impute_phone_features["COMPUTE"]: if not 'phone_data_yield_rapids_ratiovalidyieldedminutes' in features.columns: raise KeyError("RAPIDS provider needs to impute the selected event features based on phone_data_yield_rapids_ratiovalidyieldedminutes column, please set config[PHONE_DATA_YIELD][PROVIDERS][RAPIDS][COMPUTE] to True and include 'ratiovalidyieldedminutes' in [FEATURES].") + # TODO: if the type of the imputation will vary for different groups of features make conditional imputations here phone_cols = [col for col in features if \ col.startswith('phone_applications_foreground_rapids_') or col.startswith('phone_battery_rapids_') or @@ -22,7 +27,7 @@ def straw_cleaning(sensor_data_files, provider): col.startswith('phone_wifi_')] mask = features['phone_data_yield_rapids_ratiovalidyieldedminutes'] > impute_phone_features['MIN_DATA_YIELDED_MINUTES_TO_IMPUTE'] - features.loc[mask, phone_cols] = impute(features[mask][phone_cols], method=impute_phone_features["TYPE"]) + features.loc[mask, phone_cols] = impute(features[mask][phone_cols], method=impute_phone_features["TYPE"].lower()) # Drop rows with the value of data_yield_column less than data_yield_ratio_threshold data_yield_unit = provider["DATA_YIELD_FEATURE"].split("_")[3].lower() @@ -33,40 +38,50 @@ def straw_cleaning(sensor_data_files, provider): features = features[features[data_yield_column] >= provider["DATA_YIELD_RATIO_THRESHOLD"]] - # Remove cols if threshold of NaN values is passed - features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] + esm_cols = features.loc[:, features.columns.str.startswith('phone_esm')] # For later preservation of esm_cols + # Remove cols if threshold of NaN values is passed + features = features.loc[:, features.isna().sum() < provider["COLS_NAN_THRESHOLD"] * features.shape[0]] + # Remove cols where variance is 0 if provider["COLS_VAR_THRESHOLD"]: features.drop(features.std()[features.std() == 0].index.values, axis=1, inplace=True) + # Preserve esm cols if deleted (has to come after drop cols operations) + for esm in esm_cols: + if esm not in features: + features[esm] = esm_cols[esm] + # Drop highly correlated features - To-Do še en thershold var, ki je v config + kako se tretirajo NaNs? drop_corr_features = provider["DROP_HIGHLY_CORRELATED_FEATURES"] if drop_corr_features["COMPUTE"]: + numerical_cols = features.select_dtypes(include=np.number).columns.tolist() - cor_matrix = features[numerical_cols].corr(method='spearman').abs() + # Remove columns where NaN count threshold is passed + valid_features = features[numerical_cols].loc[:, features[numerical_cols].isna().sum() < drop_corr_features['MIN_OVERLAP_FOR_CORR_THRESHOLD'] * features[numerical_cols].shape[0]] + cor_matrix = valid_features.corr(method='spearman').abs() upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) - to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > drop_corr_features["CORR_THRESHOLD"])] - # Tukaj je še neka validacija s thresholdom, ampak ne razumem R kode "valid_pairs" features.drop(to_drop, axis=1, inplace=True) # Remove rows if threshold of NaN values is passed - min_count = math.ceil((1 - provider["ROWS_NAN_THRESHOLD"]) * features.shape[1]) # min not nan values in row + min_count = math.ceil((1 - provider["ROWS_NAN_THRESHOLD"]) * features.shape[1]) # minimal not nan values in row features.dropna(axis=0, thresh=min_count, inplace=True) return features def impute(df, method='zero'): - df.loc[:, df.isna().all()] = df.loc[:, df.isna().all()].fillna(0) # if column contains only NaN values impute it with 0 + + def k_nearest(df): # TODO: if needed, implement k-nearest imputation / interpolation + pass + return { # rest of the columns should be imputed with the selected method 'zero': df.fillna(0), 'mean': df.fillna(df.mean()), 'median': df.fillna(df.median()), - 'k-nearest': None # To-Do + 'k-nearest': k_nearest(df) }[method] - diff --git a/src/features/all_cleaning_overall/straw/main.py b/src/features/all_cleaning_overall/straw/main.py index 2c991afd..a9a4574c 100644 --- a/src/features/all_cleaning_overall/straw/main.py +++ b/src/features/all_cleaning_overall/straw/main.py @@ -6,12 +6,17 @@ def straw_cleaning(sensor_data_files, provider): features = pd.read_csv(sensor_data_files["sensor_data"][0]) + # TODO: reorder the cleaning steps so it makes sense for the analysis + # TODO: add conditions that differentiates cleaning steps for standardized and nonstandardized features, for this + # the snakemake rules will also have to come with additional parameter (in rules/features.smk) + # Impute selected features event impute_phone_features = provider["IMPUTE_PHONE_SELECTED_EVENT_FEATURES"] if impute_phone_features["COMPUTE"]: if not 'phone_data_yield_rapids_ratiovalidyieldedminutes' in features.columns: raise KeyError("RAPIDS provider needs to impute the selected event features based on phone_data_yield_rapids_ratiovalidyieldedminutes column, please set config[PHONE_DATA_YIELD][PROVIDERS][RAPIDS][COMPUTE] to True and include 'ratiovalidyieldedminutes' in [FEATURES].") + # TODO: if the type of the imputation will vary for different groups of features make conditional imputations here phone_cols = [col for col in features if \ col.startswith('phone_applications_foreground_rapids_') or col.startswith('phone_battery_rapids_') or @@ -43,15 +48,16 @@ def straw_cleaning(sensor_data_files, provider): # Drop highly correlated features - To-Do še en thershold var, ki je v config + kako se tretirajo NaNs? drop_corr_features = provider["DROP_HIGHLY_CORRELATED_FEATURES"] if drop_corr_features["COMPUTE"]: + numerical_cols = features.select_dtypes(include=np.number).columns.tolist() - cor_matrix = features[numerical_cols].corr(method='spearman').abs() + # Remove columns where NaN count threshold is passed + valid_features = features[numerical_cols].loc[:, features[numerical_cols].isna().sum() < drop_corr_features['MIN_OVERLAP_FOR_CORR_THRESHOLD'] * features[numerical_cols].shape[0]] + cor_matrix = valid_features.corr(method='spearman').abs() upper_tri = cor_matrix.where(np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)) - to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > drop_corr_features["CORR_THRESHOLD"])] - # Tukaj je še neka validacija s thresholdom, ampak ne razumem R kode "valid_pairs" features.drop(to_drop, axis=1, inplace=True) # Remove rows if threshold of NaN values is passed @@ -61,12 +67,15 @@ def straw_cleaning(sensor_data_files, provider): return features def impute(df, method='zero'): - df.loc[:, df.isna().all()] = df.loc[:, df.isna().all()].fillna(0) # if column contains only NaN values impute it with 0 + + def k_nearest(df): # TODO: if needed implement k-nearest imputation / interpolation + pass + return { # rest of the columns should be imputed with the selected method 'zero': df.fillna(0), 'mean': df.fillna(df.mean()), 'median': df.fillna(df.median()), - 'k-nearest': None # To-Do + 'k-nearest': k_nearest(df) }[method]