Splitting Energy Feature in Conversation to Voice and Noise.
parent
a480917b52
commit
6a5470e338
|
@ -199,8 +199,9 @@ CONVERSATION:
|
||||||
IOS: plugin_studentlife_audio
|
IOS: plugin_studentlife_audio
|
||||||
DAY_SEGMENTS: *day_segments
|
DAY_SEGMENTS: *day_segments
|
||||||
FEATURES: ["minutessilence", "minutesnoise", "minutesvoice", "minutesunknown","sumconversationduration","avgconversationduration",
|
FEATURES: ["minutessilence", "minutesnoise", "minutesvoice", "minutesunknown","sumconversationduration","avgconversationduration",
|
||||||
"sdconversationduration","minconversationduration","maxconversationduration","timefirstconversation","timelastconversation","sumenergy",
|
"sdconversationduration","minconversationduration","maxconversationduration","timefirstconversation","timelastconversation","noisesumenergy",
|
||||||
"avgenergy","sdenergy","minenergy","maxenergy","silencesensedfraction","noisesensedfraction",
|
"noiseavgenergy","noisesdenergy","noiseminenergy","noisemaxenergy","voicesumenergy",
|
||||||
|
"voiceavgenergy","voicesdenergy","voiceminenergy","voicemaxenergy","silencesensedfraction","noisesensedfraction",
|
||||||
"voicesensedfraction","unknownsensedfraction","silenceexpectedfraction","noiseexpectedfraction","voiceexpectedfraction",
|
"voicesensedfraction","unknownsensedfraction","silenceexpectedfraction","noiseexpectedfraction","voiceexpectedfraction",
|
||||||
"unknownexpectedfraction","countconversation"]
|
"unknownexpectedfraction","countconversation"]
|
||||||
RECORDINGMINUTES: 1
|
RECORDINGMINUTES: 1
|
||||||
|
|
|
@ -3,8 +3,9 @@ import pandas as pd
|
||||||
def base_conversation_features(conversation_data, day_segment, requested_features,recordingMinutes,pausedMinutes,expectedMinutes):
|
def base_conversation_features(conversation_data, day_segment, requested_features,recordingMinutes,pausedMinutes,expectedMinutes):
|
||||||
# name of the features this function can compute
|
# name of the features this function can compute
|
||||||
base_features_names = ["minutessilence", "minutesnoise", "minutesvoice", "minutesunknown","sumconversationduration","avgconversationduration",
|
base_features_names = ["minutessilence", "minutesnoise", "minutesvoice", "minutesunknown","sumconversationduration","avgconversationduration",
|
||||||
"sdconversationduration","minconversationduration","maxconversationduration","timefirstconversation","timelastconversation","sumenergy",
|
"sdconversationduration","minconversationduration","maxconversationduration","timefirstconversation","timelastconversation","noisesumenergy",
|
||||||
"avgenergy","sdenergy","minenergy","maxenergy","silencesensedfraction","noisesensedfraction",
|
"noiseavgenergy","noisesdenergy","noiseminenergy","noisemaxenergy","voicesumenergy",
|
||||||
|
"voiceavgenergy","voicesdenergy","voiceminenergy","voicemaxenergy","silencesensedfraction","noisesensedfraction",
|
||||||
"voicesensedfraction","unknownsensedfraction","silenceexpectedfraction","noiseexpectedfraction","voiceexpectedfraction",
|
"voicesensedfraction","unknownsensedfraction","silenceexpectedfraction","noiseexpectedfraction","voiceexpectedfraction",
|
||||||
"unknownexpectedfraction","countconversation"]
|
"unknownexpectedfraction","countconversation"]
|
||||||
|
|
||||||
|
@ -96,21 +97,35 @@ def base_conversation_features(conversation_data, day_segment, requested_feature
|
||||||
else:
|
else:
|
||||||
conversation_features["conversation_" + day_segment + "_timelastconversation"] = 0
|
conversation_features["conversation_" + day_segment + "_timelastconversation"] = 0
|
||||||
|
|
||||||
if "sumenergy" in features_to_compute:
|
if "noisesumenergy" in features_to_compute:
|
||||||
conversation_features["conversation_" + day_segment + "_sumenergy"] = conversation_data.groupby(["local_date"])["double_energy"].sum()
|
conversation_features["conversation_" + day_segment + "_noisesumenergy"] = conversation_data[conversation_data['inference']==1].groupby(["local_date"])["double_energy"].sum()
|
||||||
|
|
||||||
if "avgenergy" in features_to_compute:
|
if "noiseavgenergy" in features_to_compute:
|
||||||
conversation_features["conversation_" + day_segment + "_avgenergy"] = conversation_data.groupby(["local_date"])["double_energy"].mean()
|
conversation_features["conversation_" + day_segment + "_noiseavgenergy"] = conversation_data[conversation_data['inference']==1].groupby(["local_date"])["double_energy"].mean()
|
||||||
|
|
||||||
if "sdenergy" in features_to_compute:
|
if "noisesdenergy" in features_to_compute:
|
||||||
conversation_features["conversation_" + day_segment + "_sdenergy"] = conversation_data.groupby(["local_date"])["double_energy"].std()
|
conversation_features["conversation_" + day_segment + "_noisesdenergy"] = conversation_data[conversation_data['inference']==1].groupby(["local_date"])["double_energy"].std()
|
||||||
|
|
||||||
if "minenergy" in features_to_compute:
|
if "noiseminenergy" in features_to_compute:
|
||||||
conversation_features["conversation_" + day_segment + "_minenergy"] = conversation_data.groupby(["local_date"])["double_energy"].min()
|
conversation_features["conversation_" + day_segment + "_noiseminenergy"] = conversation_data[conversation_data['inference']==1].groupby(["local_date"])["double_energy"].min()
|
||||||
|
|
||||||
if "maxenergy" in features_to_compute:
|
if "noisemaxenergy" in features_to_compute:
|
||||||
conversation_features["conversation_" + day_segment + "_maxenergy"] = conversation_data.groupby(["local_date"])["double_energy"].max()
|
conversation_features["conversation_" + day_segment + "_noisemaxenergy"] = conversation_data[conversation_data['inference']==1].groupby(["local_date"])["double_energy"].max()
|
||||||
|
|
||||||
|
if "voicesumenergy" in features_to_compute:
|
||||||
|
conversation_features["conversation_" + day_segment + "_voicesumenergy"] = conversation_data[conversation_data['inference']==2].groupby(["local_date"])["double_energy"].sum()
|
||||||
|
|
||||||
|
if "voiceavgenergy" in features_to_compute:
|
||||||
|
conversation_features["conversation_" + day_segment + "_voiceavgenergy"] = conversation_data[conversation_data['inference']==2].groupby(["local_date"])["double_energy"].mean()
|
||||||
|
|
||||||
|
if "voicesdenergy" in features_to_compute:
|
||||||
|
conversation_features["conversation_" + day_segment + "_voicesdenergy"] = conversation_data[conversation_data['inference']==2].groupby(["local_date"])["double_energy"].std()
|
||||||
|
|
||||||
|
if "voiceminenergy" in features_to_compute:
|
||||||
|
conversation_features["conversation_" + day_segment + "_voiceminenergy"] = conversation_data[conversation_data['inference']==2].groupby(["local_date"])["double_energy"].min()
|
||||||
|
|
||||||
|
if "voicemaxenergy" in features_to_compute:
|
||||||
|
conversation_features["conversation_" + day_segment + "_voicemaxenergy"] = conversation_data[conversation_data['inference']==2].groupby(["local_date"])["double_energy"].max()
|
||||||
|
|
||||||
conversation_features = conversation_features.reset_index()
|
conversation_features = conversation_features.reset_index()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue