rapids/src/features/empatica_accelerometer/cf/main.py

86 lines
4.2 KiB
Python

import pandas as pd
from scipy.stats import entropy
from CalculatingFeatures.helper_functions import convert3DEmpaticaToArray, convertInputInto2d, gsrFeatureNames
from CalculatingFeatures.calculate_features import calculateFeatures
def getSampleRate(data):
try:
timestamps_diff = data['timestamp'].iloc[1] - data['timestamp'].iloc[0]
except:
raise Exception("Error occured while trying to get the sample rate from the first two sequential timestamps.")
return 1000/timestamps_diff
def extractAccFeaturesFromIntradayData(acc_intraday_data, features, time_segment, filter_data_by_segment):
acc_intraday_features = pd.DataFrame(columns=["local_segment"] + features)
if not acc_intraday_data.empty:
sample_rate = getSampleRate(acc_intraday_data)
acc_intraday_data = filter_data_by_segment(acc_intraday_data, time_segment)
if not acc_intraday_data.empty:
acc_intraday_features = pd.DataFrame()
# apply methods from calculate features module
acc_intraday_features = \
acc_intraday_data.groupby('local_segment').apply(\
lambda x: calculateFeatures(convertInputInto2d(x['accelerometer'], x.shape[0]), fs=sample_rate, featureNames=features))
acc_intraday_features.reset_index(inplace=True)
return acc_intraday_features
def cf_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
eda_intraday_data = pd.read_csv(sensor_data_files["sensor_data"])
requested_intraday_features = provider["FEATURES"]
# name of the features this function can compute
base_intraday_features_names = gsrFeatureNames
# the subset of requested features this function can compute
intraday_features_to_compute = list(set(requested_intraday_features) & set(base_intraday_features_names))
# extract features from intraday data
eda_intraday_features = extractAccFeaturesFromIntradayData(eda_intraday_data,
intraday_features_to_compute, time_segment,
filter_data_by_segment)
return eda_intraday_features
def dbdp_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
acc_data = pd.read_csv(sensor_data_files["sensor_data"])
requested_features = provider["FEATURES"]
# name of the features this function can compute
base_features_names = ["maxmagnitude", "minmagnitude", "avgmagnitude", "medianmagnitude", "stdmagnitude"]
# the subset of requested features this function can compute
features_to_compute = list(set(requested_features) & set(base_features_names))
acc_features = pd.DataFrame(columns=["local_segment"] + features_to_compute)
if not acc_data.empty:
acc_data = filter_data_by_segment(acc_data, time_segment)
if not acc_data.empty:
acc_features = pd.DataFrame()
# get magnitude related features: magnitude = sqrt(x^2+y^2+z^2)
magnitude = acc_data.apply(lambda row: np.sqrt(row["double_values_0"] ** 2 + row["double_values_1"] ** 2 + row["double_values_2"] ** 2), axis=1)
acc_data = acc_data.assign(magnitude = magnitude.values)
if "maxmagnitude" in features_to_compute:
acc_features["maxmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].max()
if "minmagnitude" in features_to_compute:
acc_features["minmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].min()
if "avgmagnitude" in features_to_compute:
acc_features["avgmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].mean()
if "medianmagnitude" in features_to_compute:
acc_features["medianmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].median()
if "stdmagnitude" in features_to_compute:
acc_features["stdmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].std()
acc_features = acc_features.reset_index()
return acc_features