Incorporate DB query for usernames into snakemake workflow.

rapids
junos 2021-12-01 18:14:27 +01:00
parent 7d0355d095
commit f5c17aa7ce
1 changed files with 10 additions and 4 deletions

View File

@ -1,4 +1,10 @@
import datetime import datetime
import os
import sys
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
import pandas as pd import pandas as pd
from features.timezone import get_timezone_data from features.timezone import get_timezone_data
@ -16,10 +22,10 @@ participants_inactive_usernames = participants.query_db.get_usernames(
) )
participants_overview_si = pd.read_csv( participants_overview_si = pd.read_csv(
"E:/STRAWbaseline/Participants_overview_Slovenia.csv", sep=";" snakemake.params["baseline_folder"] + "Participants_overview_Slovenia.csv", sep=";"
) )
participants_overview_be = pd.read_csv( participants_overview_be = pd.read_csv(
"E:/STRAWbaseline/Participants_overview_Belgium.csv", sep=";" snakemake.params["baseline_folder"]+ "Participants_overview_Belgium.csv", sep=";"
) )
participants_true_si = participants_overview_si[ participants_true_si = participants_overview_si[
@ -47,7 +53,7 @@ participants_usernames_empatica["empatica_id"] = participants_usernames_empatica
].str.replace(",", ";") ].str.replace(",", ";")
participants_usernames_empatica.to_csv( participants_usernames_empatica.to_csv(
here("rapids/data/external/main_study_participants.csv"), snakemake.output["usernames_file"],
header=True, header=True,
index=False, index=False,
line_terminator="\n", line_terminator="\n",
@ -56,7 +62,7 @@ participants_usernames_empatica.to_csv(
timezone_df = get_timezone_data(participants_inactive_usernames) timezone_df = get_timezone_data(participants_inactive_usernames)
timezone_df.to_csv( timezone_df.to_csv(
here("rapids/data/external/timezone.csv"), snakemake.output["timezone_file"],
header=True, header=True,
index=False, index=False,
line_terminator="\n", line_terminator="\n",