diff --git a/config.yaml b/config.yaml index 437f5723..013af9fc 100644 --- a/config.yaml +++ b/config.yaml @@ -1,5 +1,5 @@ # Valid database table names -SENSORS: [applications_crashes, applications_foreground, applications_notifications, battery, bluetooth, calls, fitbit_data, locations, messages, plugin_ambient_noise, plugin_device_usage, plugin_google_activity_recognition, screen] +SENSORS: [applications_crashes, applications_foreground, applications_notifications, battery, bluetooth, calls, locations, messages, plugin_ambient_noise, plugin_device_usage, plugin_google_activity_recognition, screen] FITBIT_TABLE: [fitbit_data] FITBIT_SENSORS: [heartrate, steps, sleep] @@ -71,7 +71,8 @@ BLUETOOTH: GOOGLE_ACTIVITY_RECOGNITION: DAY_SEGMENTS: *day_segments - METRICS: ['count','most_common_activity','number_unique_activities','activity_change_count'] + METRICS: ['count','mostcommonactivity','countuniqueactivities','activitychangecount','sumstationary','summobile','sumvehicle'] + BATTERY: DAY_SEGMENTS: *day_segments diff --git a/src/features/google_activity_recognition.py b/src/features/google_activity_recognition.py index bdfac29e..52a95e47 100644 --- a/src/features/google_activity_recognition.py +++ b/src/features/google_activity_recognition.py @@ -4,18 +4,17 @@ import scipy.stats as stats from features_utils import splitOvernightEpisodes, splitMultiSegmentEpisodes day_segment = snakemake.params["segment"] +metrics = snakemake.params["metrics"] #Read csv into a pandas dataframe data = pd.read_csv(snakemake.input['gar_events'],parse_dates=['local_date_time']) ar_deltas = pd.read_csv(snakemake.input['gar_deltas'],parse_dates=["local_start_date_time", "local_end_date_time", "local_start_date", "local_end_date"]) -columns = ['count','most_common_activity','count_unique_activities','activity_change_count','sumstationary','summobile','sumvehicle'] -columns = list("ar_" + str(day_segment) + "_" + column for column in columns) - +columns = list("ar_" + str(day_segment) + "_" + column for column in metrics) if data.empty: finalDataset = pd.DataFrame(columns = columns) else: - + finalDataset = pd.DataFrame() ar_deltas = splitOvernightEpisodes(ar_deltas, [],['activity']) if day_segment != "daily": @@ -31,31 +30,34 @@ else: if resampledData.empty: finalDataset = pd.DataFrame(columns = columns) else: - count = resampledData['activity_type'].resample('D').count() + #Finding the count of samples of the day + if("count" in metrics): + finalDataset["ar_" + str(day_segment) + "_count"] = resampledData['activity_type'].resample('D').count() #Finding most common activity of the day - mostCommonActivity = resampledData['activity_type'].resample('D').apply(lambda x:stats.mode(x)[0]) + if("mostcommonactivity" in metrics): + finalDataset["ar_" + str(day_segment) + "_mostcommonactivity"] = resampledData['activity_type'].resample('D').apply(lambda x:stats.mode(x)[0]) #finding different number of activities during a day - uniqueActivities = resampledData['activity_type'].resample('D').nunique() + if("countuniqueactivities" in metrics): + finalDataset["ar_" + str(day_segment) + "_countuniqueactivities"] = resampledData['activity_type'].resample('D').nunique() #finding Number of times activity changed - resampledData['activity_type_shift'] = resampledData['activity_type'].shift().fillna(resampledData['activity_type'].head(1),inplace=True) - resampledData['different_activity'] = np.where(resampledData['activity_type']!=resampledData['activity_type_shift'],1,0) - countChanges = resampledData['different_activity'].resample('D').sum() - finalDataset = pd.concat([count, mostCommonActivity, uniqueActivities, countChanges],axis=1) + if("activitychangecount" in metrics): + resampledData['activity_type_shift'] = resampledData['activity_type'].shift().fillna(resampledData['activity_type'].head(1)) + resampledData['different_activity'] = np.where(resampledData['activity_type']!=resampledData['activity_type_shift'],1,0) + finalDataset["ar_" + str(day_segment) + "_activitychangecount"] = resampledData['different_activity'].resample('D').sum() + deltas_metrics = {'sumstationary':['still','tilting'], 'summobile':['on_foot','running','on_bicycle'], 'sumvehicle':['in_vehicle']} - + for column, activity_labels in deltas_metrics.items(): - metric = (ar_deltas[ar_deltas['activity'].isin(pd.Series(activity_labels))] - .groupby(['local_start_date'])['time_diff'] - .agg({"ar_" + str(day_segment) + "_" + str(column) :'sum'})) - finalDataset = finalDataset.merge(metric,how='outer',left_index=True,right_index=True) + if column in metrics: + finalDataset["ar_" + str(day_segment) + "_"+str(column)] = (ar_deltas[ar_deltas['activity'].isin(pd.Series(activity_labels))] + .groupby(['local_start_date'])['time_diff'] + .agg({"ar_" + str(day_segment) + "_" + str(column) :'sum'})) -finalDataset.fillna(0,inplace=True) finalDataset.index.names = ['local_date'] -finalDataset.columns=columns finalDataset.to_csv(snakemake.output[0]) \ No newline at end of file