parent
5e87db9952
commit
51c7739bfc
|
@ -100,15 +100,6 @@ for provider in config["PHONE_LIGHT"]["PROVIDERS"].keys():
|
|||
files_to_compute.extend(expand("data/processed/features/{pid}/all_sensor_features.csv", pid=config["PIDS"]))
|
||||
files_to_compute.append("data/processed/features/all_participants/all_sensor_features.csv")
|
||||
|
||||
for provider in config["PHONE_KEYBOARD"]["PROVIDERS"].keys():
|
||||
if config["PHONE_KEYBOARD"]["PROVIDERS"][provider]["COMPUTE"]:
|
||||
files_to_compute.extend(expand("data/raw/{pid}/phone_keyboard_raw.csv", pid=config["PIDS"]))
|
||||
files_to_compute.extend(expand("data/raw/{pid}/phone_keyboard_with_datetime.csv", pid=config["PIDS"]))
|
||||
files_to_compute.extend(expand("data/interim/{pid}/phone_keyboard_features/phone_keyboard_{language}_{provider_key}.csv", pid=config["PIDS"], language=config["PHONE_KEYBOARD"]["PROVIDERS"][provider]["SRC_LANGUAGE"].lower(), provider_key=provider.lower()))
|
||||
files_to_compute.extend(expand("data/processed/features/{pid}/phone_keyboard.csv", pid=config["PIDS"],))
|
||||
files_to_compute.extend(expand("data/processed/features/{pid}/all_sensor_features.csv", pid=config["PIDS"]))
|
||||
files_to_compute.append("data/processed/features/all_participants/all_sensor_features.csv")
|
||||
|
||||
for provider in config["PHONE_ACCELEROMETER"]["PROVIDERS"].keys():
|
||||
if config["PHONE_ACCELEROMETER"]["PROVIDERS"][provider]["COMPUTE"]:
|
||||
files_to_compute.extend(expand("data/raw/{pid}/phone_accelerometer_raw.csv", pid=config["PIDS"]))
|
||||
|
|
|
@ -189,15 +189,6 @@ PHONE_DATA_YIELD:
|
|||
SRC_LANGUAGE: "r"
|
||||
SRC_FOLDER: "rapids" # inside src/features/phone_data_yield
|
||||
|
||||
PHONE_KEYBOARD:
|
||||
TABLE: keyboard
|
||||
PROVIDERS:
|
||||
RAPIDS:
|
||||
COMPUTE: False
|
||||
FEATURES: []
|
||||
SRC_FOLDER: "rapids" # inside src/features/phone_keyboard
|
||||
SRC_LANGUAGE: "python"
|
||||
|
||||
# See https://www.rapids.science/latest/features/phone-light/
|
||||
PHONE_LIGHT:
|
||||
TABLE: light
|
||||
|
|
|
@ -262,32 +262,6 @@ rule phone_light_r_features:
|
|||
script:
|
||||
"../src/features/entry.R"
|
||||
|
||||
rule phone_keyboard_python_features:
|
||||
input:
|
||||
sensor_data = "data/raw/{pid}/phone_keyboard_with_datetime.csv",
|
||||
time_segments_labels = "data/interim/time_segments/{pid}_time_segments_labels.csv"
|
||||
params:
|
||||
provider = lambda wildcards: config["PHONE_KEYBOARD"]["PROVIDERS"][wildcards.provider_key.upper()],
|
||||
provider_key = "{provider_key}",
|
||||
sensor_key = "phone_keyboard"
|
||||
output:
|
||||
"data/interim/{pid}/phone_keyboard_features/phone_keyboard_python_{provider_key}.csv"
|
||||
script:
|
||||
"../src/features/entry.py"
|
||||
|
||||
rule phone_keyboard_r_features:
|
||||
input:
|
||||
sensor_data = "data/raw/{pid}/phone_keyboard_with_datetime.csv",
|
||||
time_segments_labels = "data/interim/time_segments/{pid}_time_segments_labels.csv"
|
||||
params:
|
||||
provider = lambda wildcards: config["PHONE_KEYBOARD"]["PROVIDERS"][wildcards.provider_key.upper()],
|
||||
provider_key = "{provider_key}",
|
||||
sensor_key = "phone_keyboard"
|
||||
output:
|
||||
"data/interim/{pid}/phone_keyboard_features/phone_keyboard_r_{provider_key}.csv"
|
||||
script:
|
||||
"../src/features/entry.R"
|
||||
|
||||
rule phone_locations_python_features:
|
||||
input:
|
||||
sensor_data = "data/interim/{pid}/phone_locations_processed_with_datetime.csv",
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
def rapids_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
|
||||
|
||||
# I copied this from light, modify it to make it work for keyboard
|
||||
|
||||
light_data = pd.read_csv(sensor_data_files["sensor_data"])
|
||||
print(light_data)
|
||||
raise ValueError("Test")
|
||||
requested_features = provider["FEATURES"]
|
||||
# name of the features this function can compute
|
||||
base_features_names = ["count", "maxlux", "minlux", "avglux", "medianlux", "stdlux"]
|
||||
# the subset of requested features this function can compute
|
||||
features_to_compute = list(set(requested_features) & set(base_features_names))
|
||||
|
||||
light_features = pd.DataFrame(columns=["local_segment"] + features_to_compute)
|
||||
if not light_data.empty:
|
||||
light_data = filter_data_by_segment(light_data, time_segment)
|
||||
|
||||
if not light_data.empty:
|
||||
light_features = pd.DataFrame()
|
||||
if "count" in features_to_compute:
|
||||
light_features["count"] = light_data.groupby(["local_segment"]).count()["timestamp"]
|
||||
|
||||
# get light ambient luminance related features
|
||||
if "maxlux" in features_to_compute:
|
||||
light_features["maxlux"] = light_data.groupby(["local_segment"])["double_light_lux"].max()
|
||||
if "minlux" in features_to_compute:
|
||||
light_features["minlux"] = light_data.groupby(["local_segment"])["double_light_lux"].min()
|
||||
if "avglux" in features_to_compute:
|
||||
light_features["avglux"] = light_data.groupby(["local_segment"])["double_light_lux"].mean()
|
||||
if "medianlux" in features_to_compute:
|
||||
light_features["medianlux"] = light_data.groupby(["local_segment"])["double_light_lux"].median()
|
||||
if "stdlux" in features_to_compute:
|
||||
light_features["stdlux"] = light_data.groupby(["local_segment"])["double_light_lux"].std()
|
||||
|
||||
light_features = light_features.reset_index()
|
||||
|
||||
return light_features
|
Loading…
Reference in New Issue