2020-09-01 21:25:35 +02:00
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
import itertools
|
|
|
|
from scipy.stats import entropy
|
|
|
|
|
|
|
|
|
2020-12-03 00:41:03 +01:00
|
|
|
def compute_features(filtered_data, apps_type, requested_features, apps_features, time_segment):
|
2020-09-01 21:25:35 +02:00
|
|
|
# There is the rare occasion that filtered_data is empty (found in testing)
|
|
|
|
if "timeoffirstuse" in requested_features:
|
|
|
|
time_first_event = filtered_data.sort_values(by="timestamp", ascending=True).drop_duplicates(subset="local_segment", keep="first").set_index("local_segment")
|
|
|
|
if time_first_event.empty:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["timeoffirstuse" + apps_type] = np.nan
|
2020-09-01 21:25:35 +02:00
|
|
|
else:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["timeoffirstuse" + apps_type] = time_first_event["local_hour"] * 60 + time_first_event["local_minute"]
|
2020-09-01 21:25:35 +02:00
|
|
|
if "timeoflastuse" in requested_features:
|
|
|
|
time_last_event = filtered_data.sort_values(by="timestamp", ascending=False).drop_duplicates(subset="local_segment", keep="first").set_index("local_segment")
|
|
|
|
if time_last_event.empty:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["timeoflastuse" + apps_type] = np.nan
|
2020-09-01 21:25:35 +02:00
|
|
|
else:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["timeoflastuse" + apps_type] = time_last_event["local_hour"] * 60 + time_last_event["local_minute"]
|
2020-09-01 21:25:35 +02:00
|
|
|
if "frequencyentropy" in requested_features:
|
|
|
|
apps_with_count = filtered_data.groupby(["local_segment","application_name"]).count().sort_values(by="timestamp", ascending=False).reset_index()
|
|
|
|
if (len(apps_with_count.index) < 2 ):
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["frequencyentropy" + apps_type] = np.nan
|
2020-09-01 21:25:35 +02:00
|
|
|
else:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["frequencyentropy" + apps_type] = apps_with_count.groupby("local_segment")["timestamp"].agg(entropy)
|
2020-09-01 21:25:35 +02:00
|
|
|
if "count" in requested_features:
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features["count" + apps_type] = filtered_data.groupby(["local_segment"]).count()["timestamp"]
|
|
|
|
apps_features.fillna(value={"count" + apps_type: 0}, inplace=True)
|
2020-09-01 21:25:35 +02:00
|
|
|
return apps_features
|
|
|
|
|
|
|
|
|
2020-12-03 00:41:03 +01:00
|
|
|
def rapids_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
|
2020-10-08 00:11:06 +02:00
|
|
|
|
|
|
|
apps_data = pd.read_csv(sensor_data_files["sensor_data"])
|
|
|
|
|
2020-09-01 21:25:35 +02:00
|
|
|
requested_features = provider["FEATURES"]
|
|
|
|
excluded_categories = provider["EXCLUDED_CATEGORIES"]
|
|
|
|
excluded_apps = provider["EXCLUDED_APPS"]
|
|
|
|
multiple_categories_with_genres = provider["MULTIPLE_CATEGORIES"]
|
|
|
|
single_categories = provider["SINGLE_CATEGORIES"]
|
|
|
|
multiple_categories = provider["MULTIPLE_CATEGORIES"]
|
|
|
|
single_apps = provider["SINGLE_APPS"]
|
|
|
|
|
|
|
|
single_categories = list(set(single_categories) - set(excluded_categories))
|
|
|
|
multiple_categories = list(multiple_categories_with_genres.keys() - set(excluded_categories))
|
|
|
|
single_apps = list(set(single_apps) - set(excluded_apps))
|
|
|
|
|
|
|
|
# exclude categories in the excluded_categories list
|
|
|
|
if "system_apps" in excluded_categories:
|
|
|
|
apps_data = apps_data[apps_data["is_system_app"] == 0]
|
|
|
|
apps_data = apps_data[~apps_data["genre"].isin(excluded_categories)]
|
|
|
|
# exclude apps in the excluded_apps list
|
|
|
|
apps_data = apps_data[~apps_data["package_name"].isin(excluded_apps)]
|
|
|
|
|
2020-11-30 20:42:19 +01:00
|
|
|
apps_features = pd.DataFrame(columns=["local_segment"] + ["".join(feature) for feature in itertools.product(requested_features, single_categories + multiple_categories + single_apps)])
|
2020-09-01 21:25:35 +02:00
|
|
|
if not apps_data.empty:
|
|
|
|
# deep copy the apps_data for the top1global computation
|
|
|
|
apps_data_global = apps_data.copy()
|
2020-09-11 23:55:09 +02:00
|
|
|
|
2020-12-03 00:41:03 +01:00
|
|
|
apps_data = filter_data_by_segment(apps_data, time_segment)
|
2020-09-11 23:55:09 +02:00
|
|
|
|
2020-09-01 21:25:35 +02:00
|
|
|
if not apps_data.empty:
|
|
|
|
apps_features = pd.DataFrame()
|
|
|
|
# single category
|
|
|
|
single_categories.sort()
|
|
|
|
for sc in single_categories:
|
|
|
|
if sc == "all":
|
2020-12-03 00:41:03 +01:00
|
|
|
apps_features = compute_features(apps_data, "all", requested_features, apps_features, time_segment)
|
2020-09-01 21:25:35 +02:00
|
|
|
else:
|
|
|
|
filtered_data = apps_data[apps_data["genre"].isin([sc])]
|
2020-12-03 00:41:03 +01:00
|
|
|
apps_features = compute_features(filtered_data, sc, requested_features, apps_features, time_segment)
|
2020-09-01 21:25:35 +02:00
|
|
|
# multiple category
|
|
|
|
for mc in multiple_categories:
|
|
|
|
filtered_data = apps_data[apps_data["genre"].isin(multiple_categories_with_genres[mc])]
|
2020-12-03 00:41:03 +01:00
|
|
|
apps_features = compute_features(filtered_data, mc, requested_features, apps_features, time_segment)
|
2020-09-01 21:25:35 +02:00
|
|
|
# single apps
|
|
|
|
for app in single_apps:
|
|
|
|
col_name = app
|
|
|
|
if app == "top1global":
|
|
|
|
# get the most used app
|
2020-09-11 23:55:09 +02:00
|
|
|
apps_with_count = apps_data_global.groupby(["package_name"]).count().sort_values(by="timestamp", ascending=False).reset_index()
|
2020-09-01 21:25:35 +02:00
|
|
|
app = apps_with_count.iloc[0]["package_name"]
|
|
|
|
col_name = "top1global"
|
|
|
|
filtered_data = apps_data[apps_data["package_name"].isin([app])]
|
2020-12-03 00:41:03 +01:00
|
|
|
apps_features = compute_features(filtered_data, col_name, requested_features, apps_features, time_segment)
|
2020-09-11 23:55:09 +02:00
|
|
|
|
2020-09-01 21:25:35 +02:00
|
|
|
apps_features = apps_features.reset_index()
|
|
|
|
|
|
|
|
return apps_features
|