diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 37e3eb32..ca60883c 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -31,12 +31,16 @@ jobs: sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' sudo apt install r-base - name: Cache R packages - uses: actions/cache@v1 + uses: actions/cache@v2 + id: cacherenv with: path: ${{ env.RENV_PATHS_ROOT }} key: ${{ runner.os }}-renv-${{ hashFiles('**/renv.lock') }} restore-keys: | ${{ runner.os }}-renv- + - name: Install R dependencies + if: steps.cacherenv.outputs.cache-hit != 'true' + run: sudo apt install libcurl4-openssl-dev - name: Restore R packages shell: Rscript {0} run: | diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 98993d3e..00000000 --- a/.travis.yml +++ /dev/null @@ -1,74 +0,0 @@ -services: -- mysql -- docker -sudo: required -language: python -jobs: - include: - - stage: Tests - name: Python 3.7 on Xenial Linux - os: linux - language: python - python: 3.7 - before_install: - - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" - - export PATH=/home/linuxbrew/.linuxbrew/bin:$PATH - - source ~/.bashrc - - sudo apt-get install linuxbrew-wrapper - - brew tap --shallow linuxbrew/xorg - - brew install r - - R --version - - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O - miniconda.sh; - - bash miniconda.sh -b -p $HOME/miniconda - - source "$HOME/miniconda/etc/profile.d/conda.sh" - - hash -r - - conda config --set always_yes yes --set changeps1 no - install: - - conda init bash - - conda update -q --all --yes conda - - conda env create -q -n test-environment python=$TRAVIS_PYTHON_VERSION --file - environment.yml - - conda activate test-environment - - snakemake -j1 renv_install - - R -e 'renv::settings$use.cache(FALSE)' - - snakemake -j1 renv_restore - cache: - directories: - - "/home/travis/.linuxbrew" - - "$HOME/.local/share/renv" - - "$TRAVIS_BUILD_DIR/renv/library" - script: - - bash tests/scripts/run_tests.sh all test - - stage: deploy - name: Python 3.7 on Xenial Linux Docker - os: linux - language: python - script: - - docker build -t rapids . - - docker login -u "agamk" -p $DOCKERPWD - - docker tag rapids agamk/rapids:travislatest - - docker push agamk/rapids:travislatest -branches: - only: - - master - - time_segment - -stages: - - name: deploy - if: branch = master AND \ - type = push - -notifications: - email: false - slack: - secure: cJIpmIjb3zA5AMDBo9axF1v6fYNIgMm6s6UdMNOlHiT511xHGsaLUFej3lACwQLig4Gr94ySI61YdrP+RX1lFcYxusH+kUU/c8LX0PmSKNeKnycM3w/pCM+yTp/6oQG6ZrJD7pNm6zhB0xPL61uSmYhcr+JJ1sh4iLiON+J8/C+IfnAHm1ORkxJ0IxASkiP/LvaiAQDw8lNyYIZNWjSDNZbx68o1VNakyk6Vik3x8omiE3w33rzI2/JAx//QTxOq2J0dtV1AqYYSOWS4iXblV09NLBqgGrhAhrQ6+TbPHSPIyL/4EdhvS+YXO+SBWS7ODD7j/MuL6XiA4SujW72od2rgXNmOjFnlQvIrULO5bzv39BKKDkldvz9+XCyXLcjoLIwA/rmUnwMndNoC7NoD/CkQEevUxswXXB9811BmIFx/7GOHouVxwB2gaMAzkCroZJVwgbrc6ESSOVE5SMcb3wPMbpd8cXOgVZXJcmk5wK206zxXPigCvFfknqOnwDqRgyIWSFoTd/2wHppA7ND3R5U42nQTbEQ7MiONsOo61GlJTTxJELz32sLKl388AuAgOY7+0sqPibxMaHJkF1V4nYVTH0/H5bO/edK4VHMloJ6s0kuyko7LT5EMQf3pBJij5TnYmD2E60t+bSBAxHuH7WA5dvL+igjGEwROnxDc9pc= - on_success: always - template: - - Repo `%{repository_slug}` *%{result}* build (<%{build_url}|#%{build_number}>) - for commit (<%{compare_url}|%{commit}>) on branch `%{branch}`. - - 'Execution time: *%{duration}*' - - 'Message: %{message}' -env: - global: - secure: FD2aOa8L3lWf1xClZ24uS59SOBjMH16sdSLPGkb6bQLwrKAQw6BVna5wOw3iRscZtx2iqEQw3LdLmNb6ftI4fgHhf7qoAZlKVlc2Q8wU4L623Ad8S//2Ny1AxXRyzwmRw4emmIUqRXiGaeZYkzcptf38+2d9PjHazVsL3A6T2vFK+VAQmZBq3Iblx0i3g25qevQxFUACH1FIpZsmn08cesblZp0MiQ7GOq4YhBAqmbraT4/w7yFe1rwm/yPSWeBQKu8tZeZnEW6/FPYidxxuBgl/BxTdVuIKHcVzL95Mu4q6Y7uVaYeGYgyxai8eyntpY2dPu0wN1ng4JxulwqKBdxkWFPdbBJSGnYQq5EmrqULjro7wk9GVLSN9Lx0QjcmZRbNbDH0rpgxcXS9mtvzmgFbmatdsMa3VrObqKL2yYMsPZ6e5N4ve3gTU5+sm6oz/zYNWK2CDN2f08BJuaoKv9hETTfvWaZitKT7lFZ2LpsDdHSPUtRiAviDcLZcCZsTQjyCi6JeKSF2aMQ0+4rCsZgFkqpmjEVJB5N6DMkdZaUn+4HrbGsivAHWQsDcvPTD4n2CUcboV407NFsckr3PlDy0+fNNHr2h45VjO7DxAwDIJAdiwlhbj9l9gn8i3aZOtMCT6p4xIC2CgqOcY4yOTHmyOswJwnkz3uoSOq3eNLR4= diff --git a/sn_profile_rapids/Snakefile b/sn_profile_rapids/Snakefile deleted file mode 100644 index 18a469a5..00000000 --- a/sn_profile_rapids/Snakefile +++ /dev/null @@ -1,231 +0,0 @@ -import itertools -import hashlib -import collections - -configfile: "config.yaml" -include: "../rules/common.smk" -include: "../rules/renv.snakefile" -include: "../rules/preprocessing.snakefile" -include: "../rules/features.snakefile" -include: "../rules/models.snakefile" -include: "../rules/reports.snakefile" -include: "../rules/mystudy.snakefile" # You can add snakfiles with rules tailored to your project - - - -if len(config["PIDS"]) == 0: - raise ValueError("Add participants IDs to PIDS in config.yaml. Remember to create their participant files in data/external") - -files_to_compute = [] - -if config["PHONE_VALID_SENSED_BINS"]["COMPUTE"]: - if len(config["PHONE_VALID_SENSED_BINS"]["TABLES"]) == 0: - raise ValueError("If you want to compute PHONE_VALID_SENSED_BINS, you need to add at least one table to [PHONE_VALID_SENSED_BINS][TABLES] in config.yaml") - files_to_compute.extend(expand("data/interim/{pid}/phone_sensed_bins.csv", pid=config["PIDS"])) - -if config["PHONE_VALID_SENSED_DAYS"]["COMPUTE"]: - if len(config["PHONE_VALID_SENSED_BINS"]["TABLES"]) == 0: - raise ValueError("If you want to compute PHONE_VALID_SENSED_DAYS, you need to add at least one table to [PHONE_VALID_SENSED_BINS][TABLES] in config.yaml") - files_to_compute.extend(expand("data/interim/{pid}/phone_sensed_bins.csv", pid=config["PIDS"])) - files_to_compute.extend(expand("data/interim/{pid}/phone_valid_sensed_days.csv", pid=config["PIDS"])) - -if config["MESSAGES"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["MESSAGES"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["MESSAGES"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/messages_{messages_type}_{time_segment}.csv", pid=config["PIDS"], messages_type = config["MESSAGES"]["TYPES"], time_segment = config["MESSAGES"]["TIME_SEGMENTS"])) - -if config["CALLS"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["CALLS"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["CALLS"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime_unified.csv", pid=config["PIDS"], sensor=config["CALLS"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/calls_{call_type}_{time_segment}.csv", pid=config["PIDS"], call_type=config["CALLS"]["TYPES"], time_segment = config["CALLS"]["TIME_SEGMENTS"])) - -if config["BARNETT_LOCATION"]["COMPUTE"]: - # TODO add files_to_compute.extend(optional_location_input(None)) - if config["BARNETT_LOCATION"]["LOCATIONS_TO_USE"] == "RESAMPLE_FUSED": - if config["BARNETT_LOCATION"]["DB_TABLE"] in config["PHONE_VALID_SENSED_BINS"]["TABLES"]: - files_to_compute.extend(expand("data/interim/{pid}/phone_sensed_bins.csv", pid=config["PIDS"])) - else: - raise ValueError("Error: Add your locations table (and as many sensor tables as you have) to [PHONE_VALID_SENSED_BINS][TABLES] in config.yaml. This is necessary to compute phone_sensed_bins (bins of time when the smartphone was sensing data) which is used to resample fused location data (RESAMPLED_FUSED)") - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["BARNETT_LOCATION"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["BARNETT_LOCATION"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/location_barnett_{time_segment}.csv", pid=config["PIDS"], time_segment = config["BARNETT_LOCATION"]["TIME_SEGMENTS"])) - -if config["BLUETOOTH"]["COMPUTE"]: - files_to_compute.extend(expand("data/interim/{sensor}_time_segments.csv", sensor=config["BLUETOOTH"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["BLUETOOTH"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["BLUETOOTH"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/bluetooth_features.csv", pid=config["PIDS"] )) - -if config["ACTIVITY_RECOGNITION"]["COMPUTE"]: - # TODO add files_to_compute.extend(optional_ar_input(None)), the Android or iOS table gets processed depending on each participant - files_to_compute.extend(expand("data/processed/{pid}/activity_recognition_{time_segment}.csv",pid=config["PIDS"], time_segment = config["ACTIVITY_RECOGNITION"]["TIME_SEGMENTS"])) - -if config["BATTERY"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["BATTERY"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["BATTERY"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime_unified.csv", pid=config["PIDS"], sensor=config["BATTERY"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/battery_deltas.csv", pid=config["PIDS"])) - files_to_compute.extend(expand("data/processed/{pid}/battery_{time_segment}.csv", pid = config["PIDS"], time_segment = config["BATTERY"]["TIME_SEGMENTS"])) - -if config["SCREEN"]["COMPUTE"]: - if config["SCREEN"]["DB_TABLE"] in config["PHONE_VALID_SENSED_BINS"]["TABLES"]: - files_to_compute.extend(expand("data/interim/{pid}/phone_sensed_bins.csv", pid=config["PIDS"])) - else: - raise ValueError("Error: Add your screen table (and as many sensor tables as you have) to [PHONE_VALID_SENSED_BINS][TABLES] in config.yaml. This is necessary to compute phone_sensed_bins (bins of time when the smartphone was sensing data)") - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["SCREEN"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["SCREEN"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime_unified.csv", pid=config["PIDS"], sensor=config["SCREEN"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/screen_deltas.csv", pid=config["PIDS"])) - files_to_compute.extend(expand("data/processed/{pid}/screen_{time_segment}.csv", pid = config["PIDS"], time_segment = config["SCREEN"]["TIME_SEGMENTS"])) - -if config["LIGHT"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["LIGHT"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["LIGHT"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/light_{time_segment}.csv", pid = config["PIDS"], time_segment = config["LIGHT"]["TIME_SEGMENTS"])) - -if config["ACCELEROMETER"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["ACCELEROMETER"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["ACCELEROMETER"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/accelerometer_{time_segment}.csv", pid = config["PIDS"], time_segment = config["ACCELEROMETER"]["TIME_SEGMENTS"])) - -if config["APPLICATIONS_FOREGROUND"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["APPLICATIONS_FOREGROUND"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["APPLICATIONS_FOREGROUND"]["DB_TABLE"])) - files_to_compute.extend(expand("data/interim/{pid}/{sensor}_with_datetime_with_genre.csv", pid=config["PIDS"], sensor=config["APPLICATIONS_FOREGROUND"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/applications_foreground_{time_segment}.csv", pid = config["PIDS"], time_segment = config["APPLICATIONS_FOREGROUND"]["TIME_SEGMENTS"])) - -if config["WIFI"]["COMPUTE"]: - files_to_compute.extend(expand("data/interim/{sensor}_time_segments.csv", sensor=config["WIFI"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["WIFI"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["WIFI"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/wifi_features.csv", pid = config["PIDS"], time_segment = config["WIFI"]["TIME_SEGMENTS"])) - -if config["HEARTRATE"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["HEARTRATE"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/fitbit_heartrate_{fitbit_data_type}_with_datetime.csv", pid=config["PIDS"], fitbit_data_type=["summary", "intraday"])) - files_to_compute.extend(expand("data/processed/{pid}/fitbit_heartrate_{time_segment}.csv", pid = config["PIDS"], time_segment = config["HEARTRATE"]["TIME_SEGMENTS"])) - -if config["STEP"]["COMPUTE"]: - if config["STEP"]["EXCLUDE_SLEEP"]["EXCLUDE"] == True and config["STEP"]["EXCLUDE_SLEEP"]["TYPE"] == "FITBIT_BASED": - files_to_compute.extend(expand("data/raw/{pid}/fitbit_sleep_{fitbit_data_type}_with_datetime.csv", pid=config["PIDS"], fitbit_data_type=["summary"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["STEP"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/fitbit_step_{fitbit_data_type}_with_datetime.csv", pid=config["PIDS"], fitbit_data_type=["intraday"])) - files_to_compute.extend(expand("data/processed/{pid}/fitbit_step_{time_segment}.csv", pid = config["PIDS"], time_segment = config["STEP"]["TIME_SEGMENTS"])) - -if config["SLEEP"]["COMPUTE"]: - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["SLEEP"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/fitbit_sleep_{fitbit_data_type}_with_datetime.csv", pid=config["PIDS"], fitbit_data_type=["intraday", "summary"])) - files_to_compute.extend(expand("data/processed/{pid}/fitbit_sleep_{time_segment}.csv", pid = config["PIDS"], time_segment = config["SLEEP"]["TIME_SEGMENTS"])) - -if config["CONVERSATION"]["COMPUTE"]: - # TODO add files_to_compute.extend(optional_conversation_input(None)), the Android or iOS table gets processed depending on each participant - files_to_compute.extend(expand("data/processed/{pid}/conversation_{time_segment}.csv",pid=config["PIDS"], time_segment = config["CONVERSATION"]["TIME_SEGMENTS"])) - -if config["DORYAB_LOCATION"]["COMPUTE"]: - if config["DORYAB_LOCATION"]["LOCATIONS_TO_USE"] == "RESAMPLE_FUSED": - if config["DORYAB_LOCATION"]["DB_TABLE"] in config["PHONE_VALID_SENSED_BINS"]["TABLES"]: - files_to_compute.extend(expand("data/interim/{pid}/phone_sensed_bins.csv", pid=config["PIDS"])) - else: - raise ValueError("Error: Add your locations table (and as many sensor tables as you have) to [PHONE_VALID_SENSED_BINS][TABLES] in config.yaml. This is necessary to compute phone_sensed_bins (bins of time when the smartphone was sensing data) which is used to resample fused location data (RESAMPLED_FUSED)") - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_raw.csv", pid=config["PIDS"], sensor=config["DORYAB_LOCATION"]["DB_TABLE"])) - files_to_compute.extend(expand("data/raw/{pid}/{sensor}_with_datetime.csv", pid=config["PIDS"], sensor=config["DORYAB_LOCATION"]["DB_TABLE"])) - files_to_compute.extend(expand("data/processed/{pid}/location_doryab_{segment}.csv", pid=config["PIDS"], segment = config["DORYAB_LOCATION"]["TIME_SEGMENTS"])) - -if config["PARAMS_FOR_ANALYSIS"]["COMPUTE"]: - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"] - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"] - models, scalers, rows_nan_thresholds, cols_nan_thresholds = [], [], [], [] - for model_name in config["PARAMS_FOR_ANALYSIS"]["MODEL_NAMES"]: - models = models + [model_name] * len(config["PARAMS_FOR_ANALYSIS"]["MODEL_SCALER"][model_name]) * len(rows_nan_threshold) - scalers = scalers + config["PARAMS_FOR_ANALYSIS"]["MODEL_SCALER"][model_name] * len(rows_nan_threshold) - rows_nan_thresholds = rows_nan_thresholds + list(itertools.chain.from_iterable([threshold] * len(config["PARAMS_FOR_ANALYSIS"]["MODEL_SCALER"][model_name]) for threshold in rows_nan_threshold)) - cols_nan_thresholds = cols_nan_thresholds + list(itertools.chain.from_iterable([threshold] * len(config["PARAMS_FOR_ANALYSIS"]["MODEL_SCALER"][model_name]) for threshold in cols_nan_threshold)) - results = config["PARAMS_FOR_ANALYSIS"]["RESULT_COMPONENTS"] + ["merged_population_model_results"] - - files_to_compute.extend(expand("data/processed/{pid}/data_for_individual_model/{source}_{time_segment}_original.csv", - pid = config["PIDS"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"])) - files_to_compute.extend(expand("data/processed/data_for_population_model/{source}_{time_segment}_original.csv", - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"])) - files_to_compute.extend(expand( - expand("data/processed/{pid}/data_for_individual_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{source}_{time_segment}_clean.csv", - pid = config["PIDS"], - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"]), - zip, - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"], - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"])) - files_to_compute.extend(expand( - expand("data/processed/data_for_population_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{source}_{time_segment}_clean.csv", - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"]), - zip, - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"], - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"])) - files_to_compute.extend(expand("data/processed/data_for_population_model/demographic_features.csv")) - files_to_compute.extend(expand("data/processed/data_for_population_model/targets_{summarised}.csv", - summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"])) - files_to_compute.extend(expand( - expand("data/processed/data_for_population_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{source}_{time_segment}_nancellsratio.csv", - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"]), - zip, - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"], - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"])) - files_to_compute.extend(expand( - expand("data/processed/data_for_population_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{source}_{time_segment}_{summarised}.csv", - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"], - summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"]), - zip, - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"], - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"])) - files_to_compute.extend(expand( - expand("data/processed/output_population_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{source}_{time_segment}_{summarised}_{cv_method}_baseline.csv", - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - cv_method = config["PARAMS_FOR_ANALYSIS"]["CV_METHODS"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"], - summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"]), - zip, - rows_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["ROWS_NAN_THRESHOLD"], - cols_nan_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_NAN_THRESHOLD"])) - files_to_compute.extend(expand( - expand("data/processed/output_population_model/{{rows_nan_threshold}}|{{cols_nan_threshold}}_{days_before_threshold}|{days_after_threshold}_{cols_var_threshold}/{{model}}/{cv_method}/{source}_{time_segment}_{summarised}_{{scaler}}/{result}.csv", - days_before_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_BEFORE_THRESHOLD"], - days_after_threshold = config["PARAMS_FOR_ANALYSIS"]["PARTICIPANT_DAYS_AFTER_THRESHOLD"], - cols_var_threshold = config["PARAMS_FOR_ANALYSIS"]["COLS_VAR_THRESHOLD"], - cv_method = config["PARAMS_FOR_ANALYSIS"]["CV_METHODS"], - source = config["PARAMS_FOR_ANALYSIS"]["SOURCES"], - time_segment = config["PARAMS_FOR_ANALYSIS"]["TIME_SEGMENTS"], - summarised = config["PARAMS_FOR_ANALYSIS"]["SUMMARISED"], - result = results), - zip, - rows_nan_threshold = rows_nan_thresholds, - cols_nan_threshold = cols_nan_thresholds, - model = models, - scaler = scalers)) -rule all: - input: - files_to_compute - -rule clean: - shell: - "rm -rf data/raw/* && rm -rf data/interim/* && rm -rf data/processed/* && rm -rf reports/figures/* && rm -rf reports/*.zip && rm -rf reports/compliance/*" \ No newline at end of file diff --git a/sn_profile_rapids/config.yaml b/sn_profile_rapids/config.yaml deleted file mode 100644 index a68742c2..00000000 --- a/sn_profile_rapids/config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -configfile: ./sn_profile_rapids/pipeline_config.yaml -directory: ./ -snakefile: ./sn_profile_rapids/Snakefile -cores: 1 -# forcerun: compute_time_segments \ No newline at end of file diff --git a/sn_profile_rapids/pipeline_config.yaml b/sn_profile_rapids/pipeline_config.yaml deleted file mode 100644 index cbacf8e2..00000000 --- a/sn_profile_rapids/pipeline_config.yaml +++ /dev/null @@ -1,8 +0,0 @@ -PIDS: [t01] -DOWNLOAD_DATASET: - GROUP: RAPIDS -BLUETOOTH: - COMPUTE: True - TIME_SEGMENTS: "data/external/timesegments_bluetooth.csv" -WIFI: - COMPUTE: True \ No newline at end of file