rapids/src/features/cr_features_helper_methods.py

47 lines
2.8 KiB
Python

import pandas as pd
import math as m
import sys
def extract_second_order_features(intraday_features, so_features_names):
if not intraday_features.empty:
so_features = pd.DataFrame()
#print(intraday_features.drop("level_1", axis=1).groupby(["local_segment"]).nsmallest())
if "mean" in so_features_names:
so_features = pd.concat([so_features, intraday_features.drop("level_1", axis=1).groupby(["local_segment"]).mean().add_suffix("_SO_mean")], axis=1)
if "median" in so_features_names:
so_features = pd.concat([so_features, intraday_features.drop("level_1", axis=1).groupby(["local_segment"]).median().add_suffix("_SO_median")], axis=1)
if "sd" in so_features_names:
so_features = pd.concat([so_features, intraday_features.drop("level_1", axis=1).groupby(["local_segment"]).std().add_suffix("_SO_sd")], axis=1)
if "nlargest_mean" in so_features_names: # largest 5 -- maybe there is a faster groupby solution?
for column in intraday_features.columns[2:]:
so_features[column+"_SO_nlargest_mean"] = intraday_features.drop("level_1", axis=1).groupby("local_segment")[column].apply(lambda x: x.nlargest(5).mean())
if "nsmallest_mean" in so_features_names: # smallest 5 -- maybe there is a faster groupby solution?
for column in intraday_features.columns[2:]:
so_features[column+"_SO_nsmallest_mean"] = intraday_features.drop("level_1", axis=1).groupby("local_segment")[column].apply(lambda x: x.nsmallest(5).mean())
if "count_windows" in so_features_names:
so_features["SO_windowsCount"] = intraday_features.groupby(["local_segment"]).count()["level_1"]
# numPeaksNonZero specialized for EDA sensor
if "eda_num_peaks_non_zero" in so_features_names and "numPeaks" in intraday_features.columns:
so_features["SO_numPeaksNonZero"] = intraday_features.groupby("local_segment")["numPeaks"].apply(lambda x: (x!=0).sum())
# numWindowsNonZero specialized for BVP and IBI sensors
if "hrv_num_windows_non_zero" in so_features_names and "meanHr" in intraday_features.columns:
so_features["SO_numWindowsNonZero"] = intraday_features.groupby("local_segment")["meanHr"].apply(lambda x: (x!=0).sum())
so_features.reset_index(inplace=True)
else:
so_features = pd.DataFrame(columns=["local_segment"])
return so_features
def get_sample_rate(data): # To-Do get the sample rate information from the file's metadata
try:
timestamps_diff = data['timestamp'].diff().dropna().mean()
print("Timestamp diff:", timestamps_diff)
except:
raise Exception("Error occured while trying to get the mean sample rate from the data.")
return m.ceil(1000/timestamps_diff)