From d6b81e48b6ccc303e8f3ceaa184a15012ef5ce10 Mon Sep 17 00:00:00 2001 From: JulioV Date: Mon, 29 Mar 2021 09:36:17 -0500 Subject: [PATCH] Deployed 67e0caa to 0.2 with MkDocs 1.1.2 and mike 0.5.5 --- 0.2/404.html | 1595 +++++++------- 0.2/assets/javascripts/bundle.9aafa2c6.min.js | 108 + 0.2/assets/javascripts/bundle.d371fdb2.min.js | 1 - 0.2/assets/javascripts/lunr/tinyseg.js | 206 ++ 0.2/assets/javascripts/lunr/tinyseg.min.js | 1 - 0.2/assets/javascripts/vendor.12f39d2a.min.js | 15 - .../javascripts/worker/search.0f64ce30.min.js | 58 - .../workers/search.d10a1f1d.min.js | 59 + 0.2/assets/stylesheets/main.a2a6bca7.min.css | 1 - 0.2/assets/stylesheets/main.ec3b3678.min.css | 1 + .../stylesheets/palette.c308bc62.min.css | 1 - .../stylesheets/palette.de2705de.min.css | 1 + 0.2/change-log/index.html | 1875 +++++++++-------- 0.2/citation/index.html | 1799 ++++++++-------- 0.2/code_of_conduct/index.html | 1813 ++++++++-------- 0.2/developers/documentation/index.html | 1781 +++++++++------- 0.2/developers/git-flow/index.html | 1773 +++++++++------- 0.2/developers/remote-support/index.html | 1743 ++++++++------- 0.2/developers/test-cases/index.html | 1787 +++++++++------- 0.2/developers/testing/index.html | 1783 +++++++++------- .../virtual-environments/index.html | 1761 +++++++++------- 0.2/faq/index.html | 1815 ++++++++-------- 0.2/features/add-new-features/index.html | 1755 ++++++++------- 0.2/features/feature-introduction/index.html | 1701 ++++++++------- .../fitbit-heartrate-intraday/index.html | 1745 ++++++++------- .../fitbit-heartrate-summary/index.html | 1741 ++++++++------- 0.2/features/fitbit-sleep-summary/index.html | 1747 ++++++++------- 0.2/features/fitbit-steps-intraday/index.html | 1753 ++++++++------- 0.2/features/fitbit-steps-summary/index.html | 1749 ++++++++------- 0.2/features/phone-accelerometer/index.html | 1703 ++++++++------- .../phone-activity-recognition/index.html | 1705 ++++++++------- .../phone-applications-foreground/index.html | 1707 ++++++++------- 0.2/features/phone-battery/index.html | 1711 ++++++++------- 0.2/features/phone-bluetooth/index.html | 1713 ++++++++------- 0.2/features/phone-calls/index.html | 1715 ++++++++------- 0.2/features/phone-conversation/index.html | 1719 ++++++++------- 0.2/features/phone-data-yield/index.html | 1721 ++++++++------- 0.2/features/phone-light/index.html | 1723 ++++++++------- 0.2/features/phone-locations/index.html | 1727 ++++++++------- 0.2/features/phone-messages/index.html | 1729 ++++++++------- 0.2/features/phone-screen/index.html | 1731 ++++++++------- 0.2/features/phone-wifi-connected/index.html | 1735 ++++++++------- 0.2/features/phone-wifi-visible/index.html | 1737 ++++++++------- 0.2/file-structure/index.html | 1659 ++++++++------- 0.2/index.html | 1667 ++++++++------- 0.2/javascripts/extra.js | 14 - 0.2/migrating-from-old-versions/index.html | 1791 +++++++++------- 0.2/overrides/main.html | 8 + 0.2/search/search_index.json | 2 +- 0.2/setup/configuration/index.html | 1689 ++++++++------- 0.2/setup/execution/index.html | 1665 ++++++++------- 0.2/setup/installation/index.html | 1663 ++++++++------- 0.2/sitemap.xml | 168 +- 0.2/sitemap.xml.gz | Bin 216 -> 597 bytes 0.2/team/index.html | 1819 ++++++++-------- .../data-quality-visualizations/index.html | 1749 ++++++++------- .../feature-visualizations/index.html | 1753 ++++++++------- 0.2/workflow-examples/analysis/index.html | 1687 ++++++++------- 0.2/workflow-examples/minimal/index.html | 1667 ++++++++------- 59 files changed, 41169 insertions(+), 34076 deletions(-) create mode 100644 0.2/assets/javascripts/bundle.9aafa2c6.min.js delete mode 100644 0.2/assets/javascripts/bundle.d371fdb2.min.js create mode 100644 0.2/assets/javascripts/lunr/tinyseg.js delete mode 100644 0.2/assets/javascripts/lunr/tinyseg.min.js delete mode 100644 0.2/assets/javascripts/vendor.12f39d2a.min.js delete mode 100644 0.2/assets/javascripts/worker/search.0f64ce30.min.js create mode 100644 0.2/assets/javascripts/workers/search.d10a1f1d.min.js delete mode 100644 0.2/assets/stylesheets/main.a2a6bca7.min.css create mode 100644 0.2/assets/stylesheets/main.ec3b3678.min.css delete mode 100644 0.2/assets/stylesheets/palette.c308bc62.min.css create mode 100644 0.2/assets/stylesheets/palette.de2705de.min.css create mode 100644 0.2/overrides/main.html diff --git a/0.2/404.html b/0.2/404.html index a68a9c8f..53354498 100644 --- a/0.2/404.html +++ b/0.2/404.html @@ -9,8 +9,11 @@ - - + + + + + @@ -18,20 +21,21 @@ - + - + + - + - + @@ -55,7 +59,9 @@ - + + + @@ -67,27 +73,44 @@ + + +
-
+ +
-
+
+
@@ -981,10 +1142,11 @@ Made with - + Material for MkDocs Insiders +
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/assets/javascripts/bundle.9aafa2c6.min.js b/0.2/assets/javascripts/bundle.9aafa2c6.min.js new file mode 100644 index 00000000..4087fccb --- /dev/null +++ b/0.2/assets/javascripts/bundle.9aafa2c6.min.js @@ -0,0 +1,108 @@ +(()=>{var zi=Object.create,Kt=Object.defineProperty,Qi=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,qi=Object.getOwnPropertyNames,Ki=Object.getOwnPropertyDescriptor,Or=Object.getOwnPropertySymbols,Bi=Object.prototype.propertyIsEnumerable;var P=Object.assign,Ji=e=>Kt(e,"__esModule",{value:!0});var Tr=(e,t)=>{var n={};for(var r in e)Er.call(e,r)&&t.indexOf(r)<0&&(n[r]=e[r]);if(e!=null&&Or)for(var r of Or(e))t.indexOf(r)<0&&Bi.call(e,r)&&(n[r]=e[r]);return n},dt=(e,t)=>()=>(t||(t={exports:{}},e(t.exports,t)),t.exports);var Yi=(e,t,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let r of qi(t))!Er.call(e,r)&&r!=="default"&&Kt(e,r,{get:()=>t[r],enumerable:!(n=Ki(t,r))||n.enumerable});return e},nt=e=>Yi(Ji(Kt(e!=null?zi(Qi(e)):{},"default",e&&e.__esModule&&"default"in e?{get:()=>e.default,enumerable:!0}:{value:e,enumerable:!0})),e);var Mr=dt((Bt,_r)=>{(function(e,t){typeof Bt=="object"&&typeof _r!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Bt,function(){"use strict";function e(n){var r=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(M){return!!(M&&M!==document&&M.nodeName!=="HTML"&&M.nodeName!=="BODY"&&"classList"in M&&"contains"in M.classList)}function c(M){var pt=M.type,ft=M.tagName;return!!(ft==="INPUT"&&a[pt]&&!M.readOnly||ft==="TEXTAREA"&&!M.readOnly||M.isContentEditable)}function l(M){M.classList.contains("focus-visible")||(M.classList.add("focus-visible"),M.setAttribute("data-focus-visible-added",""))}function u(M){!M.hasAttribute("data-focus-visible-added")||(M.classList.remove("focus-visible"),M.removeAttribute("data-focus-visible-added"))}function m(M){M.metaKey||M.altKey||M.ctrlKey||(s(n.activeElement)&&l(n.activeElement),r=!0)}function f(M){r=!1}function d(M){!s(M.target)||(r||c(M.target))&&l(M.target)}function v(M){!s(M.target)||(M.target.classList.contains("focus-visible")||M.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(M.target))}function h(M){document.visibilityState==="hidden"&&(o&&(r=!0),z())}function z(){document.addEventListener("mousemove",j),document.addEventListener("mousedown",j),document.addEventListener("mouseup",j),document.addEventListener("pointermove",j),document.addEventListener("pointerdown",j),document.addEventListener("pointerup",j),document.addEventListener("touchmove",j),document.addEventListener("touchstart",j),document.addEventListener("touchend",j)}function D(){document.removeEventListener("mousemove",j),document.removeEventListener("mousedown",j),document.removeEventListener("mouseup",j),document.removeEventListener("pointermove",j),document.removeEventListener("pointerdown",j),document.removeEventListener("pointerup",j),document.removeEventListener("touchmove",j),document.removeEventListener("touchstart",j),document.removeEventListener("touchend",j)}function j(M){M.target.nodeName&&M.target.nodeName.toLowerCase()==="html"||(r=!1,D())}document.addEventListener("keydown",m,!0),document.addEventListener("mousedown",f,!0),document.addEventListener("pointerdown",f,!0),document.addEventListener("touchstart",f,!0),document.addEventListener("visibilitychange",h,!0),z(),n.addEventListener("focus",d,!0),n.addEventListener("blur",v,!0),n.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&n.host?n.host.setAttribute("data-js-focus-visible",""):n.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(n){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Br=dt((rs,ht)=>{var Lr,Ar,Hr,kr,Cr,jr,Ir,Fr,Rr,bt,Jt,$r,Pr,Vr,ze,Dr,Wr,Ur,Nr,zr,Qr,qr,Kr,vt;(function(e){var t=typeof global=="object"?global:typeof self=="object"?self:typeof this=="object"?this:{};typeof define=="function"&&define.amd?define("tslib",["exports"],function(r){e(n(t,n(r)))}):typeof ht=="object"&&typeof ht.exports=="object"?e(n(t,n(ht.exports))):e(n(t));function n(r,o){return r!==t&&(typeof Object.create=="function"?Object.defineProperty(r,"__esModule",{value:!0}):r.__esModule=!0),function(i,a){return r[i]=o?o(i,a):a}}})(function(e){var t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(r,o){r.__proto__=o}||function(r,o){for(var i in o)Object.prototype.hasOwnProperty.call(o,i)&&(r[i]=o[i])};Lr=function(r,o){if(typeof o!="function"&&o!==null)throw new TypeError("Class extends value "+String(o)+" is not a constructor or null");t(r,o);function i(){this.constructor=r}r.prototype=o===null?Object.create(o):(i.prototype=o.prototype,new i)},Ar=Object.assign||function(r){for(var o,i=1,a=arguments.length;i=0;u--)(l=r[u])&&(c=(s<3?l(c):s>3?l(o,i,c):l(o,i))||c);return s>3&&c&&Object.defineProperty(o,i,c),c},Cr=function(r,o){return function(i,a){o(i,a,r)}},jr=function(r,o){if(typeof Reflect=="object"&&typeof Reflect.metadata=="function")return Reflect.metadata(r,o)},Ir=function(r,o,i,a){function s(c){return c instanceof i?c:new i(function(l){l(c)})}return new(i||(i=Promise))(function(c,l){function u(d){try{f(a.next(d))}catch(v){l(v)}}function m(d){try{f(a.throw(d))}catch(v){l(v)}}function f(d){d.done?c(d.value):s(d.value).then(u,m)}f((a=a.apply(r,o||[])).next())})},Fr=function(r,o){var i={label:0,sent:function(){if(c[0]&1)throw c[1];return c[1]},trys:[],ops:[]},a,s,c,l;return l={next:u(0),throw:u(1),return:u(2)},typeof Symbol=="function"&&(l[Symbol.iterator]=function(){return this}),l;function u(f){return function(d){return m([f,d])}}function m(f){if(a)throw new TypeError("Generator is already executing.");for(;i;)try{if(a=1,s&&(c=f[0]&2?s.return:f[0]?s.throw||((c=s.return)&&c.call(s),0):s.next)&&!(c=c.call(s,f[1])).done)return c;switch(s=0,c&&(f=[f[0]&2,c.value]),f[0]){case 0:case 1:c=f;break;case 4:return i.label++,{value:f[1],done:!1};case 5:i.label++,s=f[1],f=[0];continue;case 7:f=i.ops.pop(),i.trys.pop();continue;default:if(c=i.trys,!(c=c.length>0&&c[c.length-1])&&(f[0]===6||f[0]===2)){i=0;continue}if(f[0]===3&&(!c||f[1]>c[0]&&f[1]=r.length&&(r=void 0),{value:r&&r[a++],done:!r}}};throw new TypeError(o?"Object is not iterable.":"Symbol.iterator is not defined.")},Jt=function(r,o){var i=typeof Symbol=="function"&&r[Symbol.iterator];if(!i)return r;var a=i.call(r),s,c=[],l;try{for(;(o===void 0||o-- >0)&&!(s=a.next()).done;)c.push(s.value)}catch(u){l={error:u}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(l)throw l.error}}return c},$r=function(){for(var r=[],o=0;o1||u(h,z)})})}function u(h,z){try{m(a[h](z))}catch(D){v(c[0][3],D)}}function m(h){h.value instanceof ze?Promise.resolve(h.value.v).then(f,d):v(c[0][2],h)}function f(h){u("next",h)}function d(h){u("throw",h)}function v(h,z){h(z),c.shift(),c.length&&u(c[0][0],c[0][1])}},Wr=function(r){var o,i;return o={},a("next"),a("throw",function(s){throw s}),a("return"),o[Symbol.iterator]=function(){return this},o;function a(s,c){o[s]=r[s]?function(l){return(i=!i)?{value:ze(r[s](l)),done:s==="return"}:c?c(l):l}:c}},Ur=function(r){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var o=r[Symbol.asyncIterator],i;return o?o.call(r):(r=typeof bt=="function"?bt(r):r[Symbol.iterator](),i={},a("next"),a("throw"),a("return"),i[Symbol.asyncIterator]=function(){return this},i);function a(c){i[c]=r[c]&&function(l){return new Promise(function(u,m){l=r[c](l),s(u,m,l.done,l.value)})}}function s(c,l,u,m){Promise.resolve(m).then(function(f){c({value:f,done:u})},l)}},Nr=function(r,o){return Object.defineProperty?Object.defineProperty(r,"raw",{value:o}):r.raw=o,r};var n=Object.create?function(r,o){Object.defineProperty(r,"default",{enumerable:!0,value:o})}:function(r,o){r.default=o};zr=function(r){if(r&&r.__esModule)return r;var o={};if(r!=null)for(var i in r)i!=="default"&&Object.prototype.hasOwnProperty.call(r,i)&&vt(o,r,i);return n(o,r),o},Qr=function(r){return r&&r.__esModule?r:{default:r}},qr=function(r,o){if(!o.has(r))throw new TypeError("attempted to get private field on non-instance");return o.get(r)},Kr=function(r,o,i){if(!o.has(r))throw new TypeError("attempted to set private field on non-instance");return o.set(r,i),i},e("__extends",Lr),e("__assign",Ar),e("__rest",Hr),e("__decorate",kr),e("__param",Cr),e("__metadata",jr),e("__awaiter",Ir),e("__generator",Fr),e("__exportStar",Rr),e("__createBinding",vt),e("__values",bt),e("__read",Jt),e("__spread",$r),e("__spreadArrays",Pr),e("__spreadArray",Vr),e("__await",ze),e("__asyncGenerator",Dr),e("__asyncDelegator",Wr),e("__asyncValues",Ur),e("__makeTemplateObject",Nr),e("__importStar",zr),e("__importDefault",Qr),e("__classPrivateFieldGet",qr),e("__classPrivateFieldSet",Kr)})});var fr=dt((ut,pr)=>{(function(t,n){typeof ut=="object"&&typeof pr=="object"?pr.exports=n():typeof define=="function"&&define.amd?define([],n):typeof ut=="object"?ut.ClipboardJS=n():t.ClipboardJS=n()})(ut,function(){return function(){var e={134:function(r,o,i){"use strict";i.d(o,{default:function(){return Ui}});var a=i(279),s=i.n(a),c=i(370),l=i.n(c),u=i(817),m=i.n(u);function f(O){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?f=function(b){return typeof b}:f=function(b){return b&&typeof Symbol=="function"&&b.constructor===Symbol&&b!==Symbol.prototype?"symbol":typeof b},f(O)}function d(O,g){if(!(O instanceof g))throw new TypeError("Cannot call a class as a function")}function v(O,g){for(var b=0;b0&&arguments[0]!==void 0?arguments[0]:{};this.action=b.action,this.container=b.container,this.emitter=b.emitter,this.target=b.target,this.text=b.text,this.trigger=b.trigger,this.selectedText=""}},{key:"initSelection",value:function(){this.text?this.selectFake():this.target&&this.selectTarget()}},{key:"createFakeElement",value:function(){var b=document.documentElement.getAttribute("dir")==="rtl";this.fakeElem=document.createElement("textarea"),this.fakeElem.style.fontSize="12pt",this.fakeElem.style.border="0",this.fakeElem.style.padding="0",this.fakeElem.style.margin="0",this.fakeElem.style.position="absolute",this.fakeElem.style[b?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return this.fakeElem.style.top="".concat(H,"px"),this.fakeElem.setAttribute("readonly",""),this.fakeElem.value=this.text,this.fakeElem}},{key:"selectFake",value:function(){var b=this,H=this.createFakeElement();this.fakeHandlerCallback=function(){return b.removeFake()},this.fakeHandler=this.container.addEventListener("click",this.fakeHandlerCallback)||!0,this.container.appendChild(H),this.selectedText=m()(H),this.copyText(),this.removeFake()}},{key:"removeFake",value:function(){this.fakeHandler&&(this.container.removeEventListener("click",this.fakeHandlerCallback),this.fakeHandler=null,this.fakeHandlerCallback=null),this.fakeElem&&(this.container.removeChild(this.fakeElem),this.fakeElem=null)}},{key:"selectTarget",value:function(){this.selectedText=m()(this.target),this.copyText()}},{key:"copyText",value:function(){var b;try{b=document.execCommand(this.action)}catch(H){b=!1}this.handleResult(b)}},{key:"handleResult",value:function(b){this.emitter.emit(b?"success":"error",{action:this.action,text:this.selectedText,trigger:this.trigger,clearSelection:this.clearSelection.bind(this)})}},{key:"clearSelection",value:function(){this.trigger&&this.trigger.focus(),document.activeElement.blur(),window.getSelection().removeAllRanges()}},{key:"destroy",value:function(){this.removeFake()}},{key:"action",set:function(){var b=arguments.length>0&&arguments[0]!==void 0?arguments[0]:"copy";if(this._action=b,this._action!=="copy"&&this._action!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"')},get:function(){return this._action}},{key:"target",set:function(b){if(b!==void 0)if(b&&f(b)==="object"&&b.nodeType===1){if(this.action==="copy"&&b.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(this.action==="cut"&&(b.hasAttribute("readonly")||b.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`);this._target=b}else throw new Error('Invalid "target" value, use a valid Element')},get:function(){return this._target}}]),O}(),D=z;function j(O){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?j=function(b){return typeof b}:j=function(b){return b&&typeof Symbol=="function"&&b.constructor===Symbol&&b!==Symbol.prototype?"symbol":typeof b},j(O)}function M(O,g){if(!(O instanceof g))throw new TypeError("Cannot call a class as a function")}function pt(O,g){for(var b=0;b0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof $.action=="function"?$.action:this.defaultAction,this.target=typeof $.target=="function"?$.target:this.defaultTarget,this.text=typeof $.text=="function"?$.text:this.defaultText,this.container=j($.container)==="object"?$.container:document.body}},{key:"listenClick",value:function($){var Z=this;this.listener=l()($,"click",function(rt){return Z.onClick(rt)})}},{key:"onClick",value:function($){var Z=$.delegateTarget||$.currentTarget;this.clipboardAction&&(this.clipboardAction=null),this.clipboardAction=new D({action:this.action(Z),target:this.target(Z),text:this.text(Z),container:this.container,trigger:Z,emitter:this})}},{key:"defaultAction",value:function($){return qt("action",$)}},{key:"defaultTarget",value:function($){var Z=qt("target",$);if(Z)return document.querySelector(Z)}},{key:"defaultText",value:function($){return qt("text",$)}},{key:"destroy",value:function(){this.listener.destroy(),this.clipboardAction&&(this.clipboardAction.destroy(),this.clipboardAction=null)}}],[{key:"isSupported",value:function(){var $=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Z=typeof $=="string"?[$]:$,rt=!!document.queryCommandSupported;return Z.forEach(function(Ni){rt=rt&&!!document.queryCommandSupported(Ni)}),rt}}]),b}(s()),Ui=Wi},828:function(r){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,c){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(c))return s;s=s.parentNode}}r.exports=a},438:function(r,o,i){var a=i(828);function s(u,m,f,d,v){var h=l.apply(this,arguments);return u.addEventListener(f,h,v),{destroy:function(){u.removeEventListener(f,h,v)}}}function c(u,m,f,d,v){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof f=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(h){return s(h,m,f,d,v)}))}function l(u,m,f,d){return function(v){v.delegateTarget=a(v.target,m),v.delegateTarget&&d.call(u,v)}}r.exports=c},879:function(r,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(r,o,i){var a=i(879),s=i(438);function c(f,d,v){if(!f&&!d&&!v)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(v))throw new TypeError("Third argument must be a Function");if(a.node(f))return l(f,d,v);if(a.nodeList(f))return u(f,d,v);if(a.string(f))return m(f,d,v);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function l(f,d,v){return f.addEventListener(d,v),{destroy:function(){f.removeEventListener(d,v)}}}function u(f,d,v){return Array.prototype.forEach.call(f,function(h){h.addEventListener(d,v)}),{destroy:function(){Array.prototype.forEach.call(f,function(h){h.removeEventListener(d,v)})}}}function m(f,d,v){return s(document.body,f,d,v)}r.exports=c},817:function(r){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),l=document.createRange();l.selectNodeContents(i),c.removeAllRanges(),c.addRange(l),a=c.toString()}return a}r.exports=o},279:function(r){function o(){}o.prototype={on:function(i,a,s){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var c=this;function l(){c.off(i,l),a.apply(s,arguments)}return l._=a,this.on(i,l,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),c=0,l=s.length;for(c;c{"use strict";var Da=/["'&<>]/;ci.exports=Wa;function Wa(e){var t=""+e,n=Da.exec(t);if(!n)return t;var r,o="",i=0,a=0;for(i=n.index;i0?e.prototype.requestAsyncId.call(this,n,r,o):(n.actions.push(this),n.scheduled||(n.scheduled=qe.requestAnimationFrame(function(){return n.flush(void 0)})))},t.prototype.recycleAsyncId=function(n,r,o){if(o===void 0&&(o=0),o!=null&&o>0||o==null&&this.delay>0)return e.prototype.recycleAsyncId.call(this,n,r,o);n.actions.length===0&&(qe.cancelAnimationFrame(r),n.scheduled=void 0)},t}(Et);var fn=function(e){J(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(n){this.active=!0,this.scheduled=void 0;var r=this.actions,o,i=-1;n=n||r.shift();var a=r.length;do if(o=n.execute(n.state,n.delay))break;while(++i=2,!0))}function se(e){e=e||{};var t=e.connector,n=t===void 0?function(){return new T}:t,r=e.resetOnComplete,o=r===void 0?!0:r,i=e.resetOnError,a=i===void 0?!0:i,s=e.resetOnRefCountZero,c=s===void 0?!0:s,l=null,u=null,m=0,f=!1,d=!1,v=function(){l=u=null,f=d=!1};return y(function(h,z){return m++,u=u!=null?u:n(),u.subscribe(z),l||(l=_e(h).subscribe({next:function(D){return u.next(D)},error:function(D){d=!0;var j=u;a&&v(),j.error(D)},complete:function(){f=!0;var D=u;o&&v(),D.complete()}})),function(){if(m--,c&&!m&&!d&&!f){var D=l;v(),D==null||D.unsubscribe()}}})}function te(e,t,n){var r,o,i,a=!1;return e&&typeof e=="object"?(i=(r=e.bufferSize)!==null&&r!==void 0?r:Infinity,t=(o=e.windowTime)!==null&&o!==void 0?o:Infinity,a=!!e.refCount,n=e.scheduler):i=e!=null?e:Infinity,se({connector:function(){return new wt(i,t,n)},resetOnError:!0,resetOnComplete:!1,resetOnRefCountZero:a})}function cr(e){return _(function(t,n){return e<=n})}function Fn(e){return y(function(t,n){var r=!1,o=new x(n,function(){o==null||o.unsubscribe(),r=!0},void 0,ne);q(e).subscribe(o),t.subscribe(new x(n,function(i){return r&&n.next(i)}))})}function N(){for(var e=[],t=0;tt==="focus"),N(e===De()))}var Nn=new T,Ea=he(()=>k(new ResizeObserver(e=>{for(let t of e)Nn.next(t)}))).pipe(E(e=>ee.pipe(N(e)).pipe(I(()=>e.disconnect()))),te(1));function Fe(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Ft(e){return{width:e.scrollWidth,height:e.scrollHeight}}function Re(e){return Ea.pipe(A(t=>t.observe(e)),E(t=>Nn.pipe(_(({target:n})=>n===e),I(()=>t.unobserve(e)),p(()=>Fe(e)))),N(Fe(e)))}function zn(e){return{x:e.scrollLeft,y:e.scrollTop}}function Oa(e){return V(S(e,"scroll"),S(window,"resize")).pipe(p(()=>zn(e)),N(zn(e)))}function Qn(e,t=16){return Oa(e).pipe(p(({y:n})=>{let r=Fe(e),o=Ft(e);return n>=o.height-r.height-t}),Q())}function qn(e){if(e instanceof HTMLInputElement)e.select();else throw new Error("Not implemented")}var Rt={drawer:ie("[data-md-toggle=drawer]"),search:ie("[data-md-toggle=search]")};function Kn(e){return Rt[e].checked}function $e(e,t){Rt[e].checked!==t&&Rt[e].click()}function $t(e){let t=Rt[e];return S(t,"change").pipe(p(()=>t.checked),N(t.checked))}function Ta(e){switch(e.tagName){case"INPUT":case"SELECT":case"TEXTAREA":return!0;default:return e.isContentEditable}}function Bn(){return S(window,"keydown").pipe(_(e=>!(e.metaKey||e.ctrlKey)),p(e=>({mode:Kn("search")?"search":"global",type:e.key,claim(){e.preventDefault(),e.stopPropagation()}})),_(({mode:e})=>{if(e==="global"){let t=De();if(typeof t!="undefined")return!Ta(t)}return!0}),se())}function we(){return new URL(location.href)}function Jn(e){location.href=e.href}function Yn(){return new T}function Gn(){return location.hash.substring(1)}function Xn(e){let t=Ie("a");t.href=e,t.addEventListener("click",n=>n.stopPropagation()),t.click()}function _a(){return S(window,"hashchange").pipe(p(Gn),N(Gn()),_(e=>e.length>0),se())}function Zn(){return _a().pipe(E(e=>k(ce(`[id="${e}"]`))))}function Xe(e){let t=matchMedia(e);return S(t,"change").pipe(p(n=>n.matches),N(t.matches))}function eo(){return V(Xe("print").pipe(_(Boolean)),S(window,"beforeprint")).pipe(G(void 0))}function lr(e,t){return e.pipe(E(n=>n?t():ee))}function Pt(e,t={credentials:"same-origin"}){return _e(fetch(`${e}`,t)).pipe(_(n=>n.status===200))}function Ee(e,t){return Pt(e,t).pipe(E(n=>n.json()),te(1))}function to(e,t){let n=new DOMParser;return Pt(e,t).pipe(E(r=>r.text()),p(r=>n.parseFromString(r,"text/xml")),te(1))}function ro(e){let t=Ie("script");return t.src=e,he(()=>(document.head.appendChild(t),V(S(t,"load"),S(t,"error").pipe(E(()=>yn(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(G(void 0),I(()=>document.head.removeChild(t)),ve(1))))}function no(){return{x:Math.max(0,pageXOffset),y:Math.max(0,pageYOffset)}}function ur({x:e,y:t}){window.scrollTo(e||0,t||0)}function oo(){return V(S(window,"scroll",{passive:!0}),S(window,"resize",{passive:!0})).pipe(p(no),N(no()))}function io(){return{width:innerWidth,height:innerHeight}}function ao(){return S(window,"resize",{passive:!0}).pipe(p(io),N(io()))}function so(){return B([oo(),ao()]).pipe(p(([e,t])=>({offset:e,size:t})),te(1))}function Vt(e,{viewport$:t,header$:n}){let r=t.pipe(U("size")),o=B([r,n]).pipe(p(()=>({x:e.offsetLeft,y:e.offsetTop})));return B([n,t,o]).pipe(p(([{height:i},{offset:a,size:s},{x:c,y:l}])=>({offset:{x:a.x-c,y:a.y-l+i},size:s})))}function co(e,{tx$:t}){let n=S(e,"message").pipe(p(({data:r})=>r));return t.pipe(Pn(()=>n,{leading:!0,trailing:!0}),A(r=>e.postMessage(r)),Rn(n),se())}var Ma=ie("#__config"),Ze=JSON.parse(Ma.textContent);Ze.base=new URL(Ze.base,we()).toString().replace(/\/$/,"");function pe(){return Ze}function fe(e){return Ze.features.includes(e)}function X(e,t){return typeof t!="undefined"?Ze.translations[e].replace("#",t.toString()):Ze.translations[e]}function Oe(e,t=document){return ie(`[data-md-component=${e}]`,t)}function re(e,t=document){return W(`[data-md-component=${e}]`,t)}var Ko=nt(fr());function lo(e,t=0){e.setAttribute("tabindex",t.toString())}function uo(e){e.removeAttribute("tabindex")}function po(e,t){e.setAttribute("data-md-state","lock"),e.style.top=`-${t}px`}function fo(e){let t=-1*parseInt(e.style.top,10);e.removeAttribute("data-md-state"),e.style.top="",t&&window.scrollTo(0,t)}function mo(e,t){e.setAttribute("data-md-state",t)}function ho(e){e.removeAttribute("data-md-state")}function bo(e,t){e.classList.toggle("md-nav__link--active",t)}function vo(e){e.classList.remove("md-nav__link--active")}function go(e,t){e.firstElementChild.innerHTML=t}function yo(e,t){e.setAttribute("data-md-state",t)}function xo(e){e.removeAttribute("data-md-state")}function So(e,t){e.setAttribute("data-md-state",t)}function wo(e){e.removeAttribute("data-md-state")}function Eo(e,t){e.setAttribute("data-md-state",t)}function Oo(e){e.removeAttribute("data-md-state")}function To(e,t){e.placeholder=t}function _o(e){e.placeholder=X("search.placeholder")}function Mo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let n of t)Mo(e,n)}function R(e,t,...n){let r=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="boolean"?r.setAttribute(o,t[o]):t[o]&&r.setAttribute(o,"");for(let o of n)Mo(r,o);return r}function Lo(e,t){let n=t;if(e.length>n){for(;e[n]!==" "&&--n>0;);return`${e.substring(0,n)}...`}return e}function Dt(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Ao(e,t){switch(t){case 0:e.textContent=X("search.result.none");break;case 1:e.textContent=X("search.result.one");break;default:e.textContent=X("search.result.other",Dt(t))}}function Ho(e){e.textContent=X("search.result.placeholder")}function ko(e,t){e.appendChild(t)}function Co(e){e.innerHTML=""}function jo(e,t){e.style.top=`${t}px`}function Io(e){e.style.top=""}function Fo(e,t){let n=e.firstElementChild;n.style.height=`${t-2*n.offsetTop}px`}function Ro(e){let t=e.firstElementChild;t.style.height=""}function $o(e,t){e.lastElementChild.appendChild(t)}function Po(e,t){e.lastElementChild.setAttribute("data-md-state",t)}function Vo(e,t){e.setAttribute("data-md-state",t)}function mr(e){e.removeAttribute("data-md-state")}function Do(e,t){e.setAttribute("data-md-state",t)}function dr(e){e.removeAttribute("data-md-state")}function Wo(e){return R("button",{class:"md-clipboard md-icon",title:X("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Uo(e,t){return R("aside",{class:"md-annotation",tabIndex:0},R("div",{class:"md-tooltip"},R("div",{class:"md-tooltip__inner md-typeset"},Array.from(t.childNodes))),R("span",{class:"md-annotation__index"},e))}var Ue;(function(e){e[e.TEASER=1]="TEASER",e[e.PARENT=2]="PARENT"})(Ue||(Ue={}));function hr(e,t){let n=t&2,r=t&1,o=Object.keys(e.terms).filter(a=>!e.terms[a]).map(a=>[R("del",null,a)," "]).flat().slice(0,-1),i=new URL(e.location);return fe("search.highlight")&&i.searchParams.set("h",Object.entries(e.terms).filter(([,a])=>a).reduce((a,[s])=>`${a} ${s}`.trim(),"")),R("a",{href:`${i}`,class:"md-search-result__link",tabIndex:-1},R("article",{class:["md-search-result__article",...n?["md-search-result__article--document"]:[]].join(" "),"data-md-score":e.score.toFixed(2)},n>0&&R("div",{class:"md-search-result__icon md-icon"}),R("h1",{class:"md-search-result__title"},e.title),r>0&&e.text.length>0&&R("p",{class:"md-search-result__teaser"},Lo(e.text,320)),r>0&&o.length>0&&R("p",{class:"md-search-result__terms"},X("search.result.term.missing"),": ",o)))}function No(e){let t=e[0].score,n=[...e],r=n.findIndex(l=>!l.location.includes("#")),[o]=n.splice(r,1),i=n.findIndex(l=>l.scorehr(l,1)),...s.length?[R("details",{class:"md-search-result__more"},R("summary",{tabIndex:-1},s.length>0&&s.length===1?X("search.result.more.one"):X("search.result.more.other",s.length)),s.map(l=>hr(l,1)))]:[]];return R("li",{class:"md-search-result__item"},c)}function zo(e){return R("ul",{class:"md-source__facts"},Object.entries(e).map(([t,n])=>R("li",{class:`md-source__fact md-source__fact--${t}`},typeof n=="number"?Dt(n):n)))}function Qo(e){return R("div",{class:"md-typeset__scrollwrap"},R("div",{class:"md-typeset__table"},e))}function La(e){let t=pe(),n=new URL(`${e.version}/`,t.base);return R("li",{class:"md-version__item"},R("a",{href:`${n}`,class:"md-version__link"},e.title))}function qo(e,t){return R("div",{class:"md-version"},R("span",{class:"md-version__current"},t.title),R("ul",{class:"md-version__list"},e.map(La)))}var Aa=0;function Ha(e,{viewport$:t}){let n=k(e).pipe(E(i=>{let a=i.closest("[data-tabs]");return a instanceof HTMLElement?V(...W("input",a).map(s=>S(s,"change"))):ee})),r=[],o=e.closest(".annotate.highlighttable")||e.closest(".annotate.highlight");if(o){let i=o.nextElementSibling;if(i instanceof HTMLOListElement){let a=Array.from(i.children);i.remove();for(let s of W(".c, .c1, .cm",e)){let[,c=-1]=s.textContent.match(/\((\d+)\)/)||[],l=a[+c-1];if(typeof l!="undefined"){let u=Uo(+c,l);s.replaceWith(u),r.push(u)}}}}return t.pipe(U("size"),je(n),p(()=>{let i=Fe(e),a=Ft(e);return P({scroll:a.width>i.width},r.length&&{annotations:r})}),U("scroll"))}function Bo(e,t){let n=new T;if(n.pipe(Se(Xe("(hover)"))).subscribe(([{scroll:r},o])=>{r&&o?lo(e):uo(e)}),n.pipe(ve(1),It(({annotations:r})=>!!(r==null?void 0:r.length)),p(({annotations:r})=>r.map(o=>ie(".md-tooltip",o))),Ve(viewport$.pipe(U("size")))).subscribe(([r,{size:o}])=>{for(let i of r){let{x:a,width:s}=i.getBoundingClientRect();a+s>o.width?i.classList.add("md-tooltip--end"):i.classList.remove("md-tooltip--end")}}),Ko.default.isSupported()){let r=e.closest("pre");r.id=`__code_${++Aa}`,r.insertBefore(Wo(r.id),e)}return Ha(e,t).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}var br,ka=0;function Jo(e){return br||(br=ro("https://unpkg.com/mermaid@8.8.4/dist/mermaid.min.js").pipe(A(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Ca})),te(1))),br.subscribe(()=>{let t=e.innerText;mermaid.mermaidAPI.render(`__mermaid_${ka++}`,t,n=>{e.innerHTML=n})}),br.pipe(G({ref:e}))}var Ca=` + rect.actor { + fill: white; + } + .classLabel .box { + background-color: var(--md-mermaid-label-bg-color); + fill: var(--md-mermaid-label-bg-color); + opacity: 1; + } + .classLabel .label { + font-family: var(--md-mermaid-font-family); + fill: var(--md-mermaid-label-fg-color) + } + .statediagram-cluster.statediagram-cluster .inner { + fill: var(--md-default-bg-color); + } + .statediagram-state rect.divider { + stroke: var(--md-default-fg-color--lighter); + fill: var(--md-default-fg-color--lightest); + } + .cluster rect { + stroke: var(--md-default-fg-color--lighter); + fill: var(--md-default-fg-color--lightest); + } + .edgeLabel, + .edgeLabel rect { + background-color: var(--md-mermaid-label-bg-color); + fill: var(--md-mermaid-label-bg-color); + } + .cardinality text { + fill: inherit !important; + } + .cardinality, + g.classGroup text { + font-family: var(--md-mermaid-font-family); + fill: var(--md-mermaid-label-fg-color); + } + .edgeLabel .label rect { + fill: transparent; + } + .nodeLabel, + .label, + .label div .edgeLabel { + font-family: var(--md-mermaid-font-family); + color: var(--md-mermaid-label-fg-color); + } + .label foreignObject { + overflow: visible; + } + .arrowheadPath, + marker { + fill: var(--md-mermaid-edge-color) !important; + } + .edgePath .path, + .flowchart-link, + .relation, + .transition { + stroke: var(--md-mermaid-edge-color); + } + .statediagram-cluster rect, + g.classGroup line, + g.classGroup rect, + .node circle, + .node ellipse, + .node path, + .node polygon, + .node rect { + fill: var(--md-mermaid-node-bg-color); + stroke: var(--md-mermaid-node-fg-color); + } + .node circle.state-end { + fill: var(--md-mermaid-label-bg-color); + stroke: none; + } + .node circle.state-start { + fill: var(--md-mermaid-label-fg-color); + stroke: var(--md-mermaid-label-fg-color); + } +`;function ja(e,{target$:t,print$:n}){return t.pipe(p(r=>r.closest("details:not([open])")),_(r=>e===r),je(n),G(e))}function Yo(e,t){let n=new T;return n.subscribe(()=>{e.setAttribute("open",""),e.scrollIntoView()}),ja(e,t).pipe(A(n),I(()=>n.complete()),G({ref:e}))}var Go=Ie("table");function Xo(e){return We(e,Go),We(Go,Qo(e)),k({ref:e})}function Zo(e,{target$:t,viewport$:n,print$:r}){return V(...W("pre:not([class^=mermaid]) > code",e).map(o=>Bo(o,{viewport$:n})),...W(".mermaid-experimental",e).map(o=>Jo(o)),...W("table:not([class])",e).map(o=>Xo(o)),...W("details",e).map(o=>Yo(o,{target$:t,print$:r})))}function Ia(e,{alert$:t}){return t.pipe(E(n=>V(k(!0),k(!1).pipe(Ce(2e3))).pipe(p(r=>({message:n,open:r})))))}function ei(e,t){let n=new T;return n.pipe(K(Y)).subscribe(({message:r,open:o})=>{go(e,r),o?yo(e,"open"):xo(e)}),Ia(e,t).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}function Fa({viewport$:e}){if(!fe("header.autohide"))return k(!1);let t=e.pipe(p(({offset:{y:o}})=>o),xe(2,1),p(([o,i])=>[oMath.abs(i-o.y)>100),p(([,[o]])=>o),Q()),r=$t("search");return B([e,r]).pipe(p(([{offset:o},i])=>o.y>400&&!i),Q(),E(o=>o?n:k(!1)),N(!1))}function ti(e,t){return he(()=>{let n=getComputedStyle(e);return k(n.position==="sticky"||n.position==="-webkit-sticky")}).pipe(Ve(Re(e),Fa(t)),p(([n,{height:r},o])=>({height:n?r:0,sticky:n,hidden:o})),Q((n,r)=>n.sticky===r.sticky&&n.height===r.height&&n.hidden===r.hidden),te(1))}function ri(e,{header$:t,main$:n}){let r=new T;return r.pipe(U("active"),Ve(t),K(Y)).subscribe(([{active:o},{hidden:i}])=>{o?So(e,i?"hidden":"shadow"):wo(e)}),n.subscribe(o=>r.next(o)),t.pipe(p(o=>P({ref:e},o)))}function Ra(e,{viewport$:t,header$:n}){return Vt(e,{header$:n,viewport$:t}).pipe(p(({offset:{y:r}})=>{let{height:o}=Fe(e);return{active:r>=o}}),U("active"))}function ni(e,t){let n=new T;n.pipe(K(Y)).subscribe(({active:o})=>{o?Eo(e,"active"):Oo(e)});let r=ce("article h1");return typeof r=="undefined"?ee:Ra(r,t).pipe(A(n),I(()=>n.complete()),p(o=>P({ref:e},o)))}function oi(e,{viewport$:t,header$:n}){let r=n.pipe(p(({height:i})=>i),Q()),o=r.pipe(E(()=>Re(e).pipe(p(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),U("bottom"))));return B([r,o,t]).pipe(p(([i,{top:a,bottom:s},{offset:{y:c},size:{height:l}}])=>(l=Math.max(0,l-Math.max(0,a-c,i)-Math.max(0,l+c-s)),{offset:a-i,height:l,active:a-i<=c})),Q((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function $a(e){let t=localStorage.getItem(__prefix("__palette")),n=JSON.parse(t)||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=k(...e).pipe(oe(o=>S(o,"change").pipe(G(o))),N(e[Math.max(0,n.index)]),p(o=>({index:e.indexOf(o),color:{scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),te(1));return r.subscribe(o=>{localStorage.setItem(__prefix("__palette"),JSON.stringify(o))}),r}function ii(e){let t=new T;t.subscribe(r=>{for(let[o,i]of Object.entries(r.color))typeof i=="string"&&document.body.setAttribute(`data-md-color-${o}`,i);for(let o=0;ot.complete()),p(r=>P({ref:e},r)))}var vr=nt(fr());function Pa(e){let t=W(".md-annotation",e);for(let r of t)r.hidden=!0;let n=e.innerText;for(let r of t)r.hidden=!1;return n}function ai({alert$:e}){vr.default.isSupported()&&new L(t=>{new vr.default("[data-clipboard-target], [data-clipboard-text]",{text:n=>n.getAttribute("data-clipboard-text")||Pa(ie(n.getAttribute("data-clipboard-target")))}).on("success",n=>t.next(n))}).subscribe(()=>e.next(X("clipboard.copied")))}function Va(e){if(e.length<2)return e;let[t,n]=e.sort((i,a)=>i.length-a.length).map(i=>i.replace(/[^/]+$/,"")),r=0;if(t===n)r=t.length;else for(;t.charCodeAt(r)===n.charCodeAt(r);)r++;let o=pe();return e.map(i=>i.replace(t.slice(0,r),`${o.base}/`))}function si({document$:e,location$:t,viewport$:n}){let r=pe();if(location.protocol==="file:")return;"scrollRestoration"in history&&(history.scrollRestoration="manual",S(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}));let o=ce("link[rel=icon]");typeof o!="undefined"&&(o.href=o.href);let i=to(`${r.base}/sitemap.xml`).pipe(p(l=>Va(W("loc",l).map(u=>u.textContent))),E(l=>S(document.body,"click").pipe(_(u=>!u.metaKey&&!u.ctrlKey),E(u=>{if(u.target instanceof Element){let m=u.target.closest("a");if(m&&!m.target&&l.includes(m.href))return u.preventDefault(),k({url:new URL(m.href)})}return ee}))),se()),a=S(window,"popstate").pipe(_(l=>l.state!==null),p(l=>({url:new URL(location.href),offset:l.state})),se());V(i,a).pipe(Q((l,u)=>l.url.href===u.url.href),p(({url:l})=>l)).subscribe(t);let s=t.pipe(U("pathname"),E(l=>Pt(l.href).pipe(ct(()=>(Jn(l),ee)))),se());i.pipe(lt(s)).subscribe(({url:l})=>{history.pushState({},"",`${l}`)});let c=new DOMParser;s.pipe(E(l=>l.text()),p(l=>c.parseFromString(l,"text/html"))).subscribe(e),V(i,a).pipe(lt(e)).subscribe(({url:l,offset:u})=>{l.hash&&!u?Xn(l.hash):ur(u||{y:0})}),e.pipe(cr(1)).subscribe(l=>{for(let u of["title","link[rel=canonical]","meta[name=author]","meta[name=description]","[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=logo], .md-logo","[data-md-component=skip]"]){let m=ce(u),f=ce(u,l);typeof m!="undefined"&&typeof f!="undefined"&&We(m,f)}}),e.pipe(cr(1),p(()=>Oe("container")),E(l=>k(...W("script",l))),An(l=>{let u=Ie("script");if(l.src){for(let m of l.getAttributeNames())u.setAttribute(m,l.getAttribute(m));return We(l,u),new L(m=>{u.onload=()=>m.complete()})}else return u.textContent=l.textContent,We(l,u),ge})).subscribe(),n.pipe(Fn(i),Hn(250),U("offset")).subscribe(({offset:l})=>{history.replaceState(l,"")}),V(i,a).pipe(xe(2,1),_(([l,u])=>l.url.pathname===u.url.pathname),p(([,l])=>l)).subscribe(({offset:l})=>{ur(l||{y:0})})}var Ua=nt(li());function gr(e){let t=new RegExp(e.separator,"img"),n=(r,o,i)=>`${o}${i}`;return r=>{r=r.replace(/[\s*+\-:~^]+/g," ").trim();let o=new RegExp(`(^|${e.separator})(${r.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(t,"|")})`,"img");return i=>i.replace(o,n).replace(/<\/mark>(\s+)]*>/img,"$1")}}function ui(e){return e.split(/"([^"]+)"/g).map((t,n)=>n&1?t.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g," +"):t).join("").replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g,"").trim()}var Le;(function(e){e[e.SETUP=0]="SETUP",e[e.READY=1]="READY",e[e.QUERY=2]="QUERY",e[e.RESULT=3]="RESULT"})(Le||(Le={}));function Wt(e){return e.type===1}function pi(e){return e.type===2}function et(e){return e.type===3}function Na({config:e,docs:t,index:n}){e.lang.length===1&&e.lang[0]==="en"&&(e.lang=[X("search.config.lang")]),e.separator==="[\\s\\-]+"&&(e.separator=X("search.config.separator"));let o={pipeline:X("search.config.pipeline").split(/\s*,\s*/).filter(Boolean),suggestions:fe("search.suggest")};return{config:e,docs:t,index:n,options:o}}function fi(e,t){let n=pe(),r=new Worker(e),o=new T,i=co(r,{tx$:o}).pipe(p(a=>{if(et(a))for(let s of a.data.items)for(let c of s)c.location=`${n.base}/${c.location}`;return a}),se());return _e(t).pipe(p(a=>({type:Le.SETUP,data:Na(a)}))).subscribe(o.next.bind(o)),{tx$:o,rx$:i}}function mi(){let e=pe();Ee(new URL("versions.json",e.base)).subscribe(t=>{var i;let[,n]=e.base.match(/([^/]+)\/?$/),r=t.find(({version:a,aliases:s})=>a===n||s.includes(n))||t[0];if(ie(".md-header__topic").appendChild(qo(t,r)),!sessionStorage.getItem(__prefix("__outdated"))){let a=((i=e.version)==null?void 0:i.default)||"latest",s=!r.aliases.includes(a);if(sessionStorage.setItem(__prefix("__outdated"),JSON.stringify(s)),s)for(let c of re("outdated"))c.hidden=!1}})}function za(e,{rx$:t}){let n=(__search==null?void 0:__search.transform)||ui,r=Un(e),o=V(S(e,"keyup"),S(e,"focus").pipe(Ce(1))).pipe(p(()=>n(e.value)),Q()),i=we();return i.searchParams.has("q")&&($e("search",!0),t.pipe(_(Wt),ve(1)).subscribe(()=>{e.value=i.searchParams.get("q"),Me(e)})),B([o,r]).pipe(p(([a,s])=>({value:a,focus:s})))}function di(e,{tx$:t,rx$:n}){let r=new T;return r.pipe(U("value"),p(({value:o})=>({type:Le.QUERY,data:o}))).subscribe(t.next.bind(t)),r.pipe(U("focus")).subscribe(({focus:o})=>{o?($e("search",o),To(e,"")):_o(e)}),S(e.form,"reset").pipe($n(r.pipe(Cn(1)))).subscribe(()=>Me(e)),za(e,{tx$:t,rx$:n}).pipe(A(r),I(()=>r.complete()),p(o=>P({ref:e},o)))}function hi(e,{rx$:t},{query$:n}){let r=new T,o=Qn(e.parentElement).pipe(_(Boolean)),i=ie(":scope > :first-child",e);r.pipe(K(Y),Se(n)).subscribe(([{items:c},{value:l}])=>{l?Ao(i,c.length):Ho(i)});let a=ie(":scope > :last-child",e);return r.pipe(K(Y),A(()=>Co(a)),E(({items:c})=>V(k(...c.slice(0,10)),k(...c.slice(10)).pipe(xe(4),Dn(o),E(([l])=>k(...l)))))).subscribe(c=>{ko(a,No(c))}),t.pipe(_(et),p(({data:c})=>c)).pipe(A(r),I(()=>r.complete()),p(c=>P({ref:e},c)))}function Qa(e,{query$:t}){return t.pipe(p(({value:n})=>{let r=we();return r.searchParams.delete("h"),r.searchParams.set("q",n),{url:r}}))}function bi(e,t){let n=new T;return n.subscribe(({url:r})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${r}`}),S(e,"click").subscribe(r=>r.preventDefault()),Qa(e,t).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}function vi(e,{rx$:t},{keyboard$:n}){let r=new T,o=Oe("search-query"),i=S(o,"keydown").pipe(K(Pe),p(()=>o.value),Q());return r.pipe(Ve(i),p(([{suggestions:s},c])=>{let l=c.split(/([\s-]+)/);if((s==null?void 0:s.length)&&l[l.length-1]){let u=s[s.length-1];u.startsWith(l[l.length-1])&&(l[l.length-1]=u)}else l.length=0;return l})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),n.pipe(_(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&o.selectionStart===o.value.length&&(o.value=e.innerText);break}}),t.pipe(_(et),p(({data:s})=>s)).pipe(A(r),I(()=>r.complete()),p(()=>({ref:e})))}function gi(e,{index$:t,keyboard$:n}){let r=pe(),o=fi(r.search,t),i=Oe("search-query",e),a=Oe("search-result",e),{tx$:s,rx$:c}=o;s.pipe(_(pi),lt(c.pipe(_(Wt),ve(1)))).subscribe(s.next.bind(s)),n.pipe(_(({mode:m})=>m==="search")).subscribe(m=>{let f=De();switch(m.type){case"Enter":if(f===i){let d=new Map;for(let v of W(":first-child [href]",a)){let h=v.firstElementChild;d.set(v,parseFloat(h.getAttribute("data-md-score")))}if(d.size){let[[v]]=[...d].sort(([,h],[,z])=>z-h);v.click()}m.claim()}break;case"Escape":case"Tab":$e("search",!1),Me(i,!1);break;case"ArrowUp":case"ArrowDown":if(typeof f=="undefined")Me(i);else{let d=[i,...W(":not(details) > [href], summary, details[open] [href]",a)],v=Math.max(0,(Math.max(0,d.indexOf(f))+d.length+(m.type==="ArrowUp"?-1:1))%d.length);Me(d[v])}m.claim();break;default:i!==De()&&Me(i)}}),n.pipe(_(({mode:m})=>m==="global")).subscribe(m=>{switch(m.type){case"f":case"s":case"/":Me(i),qn(i),m.claim();break}});let l=di(i,o),u=hi(a,o,{query$:l});return V(l,u).pipe(je(...re("search-share",e).map(m=>bi(m,{query$:l})),...re("search-suggest",e).map(m=>vi(m,o,{keyboard$:n}))))}function yi(e,{index$:t,location$:n}){return B([t,n.pipe(N(we()),_(r=>r.searchParams.has("h")))]).pipe(p(([r,o])=>gr(r.config)(o.searchParams.get("h"))),p(r=>{var a;let o=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)==null?void 0:a.offsetHeight){let c=s.textContent,l=r(c);l.length>c.length&&o.set(s,l)}for(let[s,c]of o){let{childNodes:l}=R("span",null,c);s.replaceWith(...Array.from(l))}return{ref:e,nodes:o}}))}function qa(e,{viewport$:t,main$:n}){let r=e.parentElement.offsetTop-e.parentElement.parentElement.offsetTop;return B([n,t]).pipe(p(([{offset:o,height:i},{offset:{y:a}}])=>(i=i+Math.min(r,Math.max(0,a-o))-r,{height:i,locked:a>=o+r})),Q((o,i)=>o.height===i.height&&o.locked===i.locked))}function yr(e,r){var{header$:t}=r,n=Tr(r,["header$"]);let o=new T;return o.pipe(K(Y),Se(t)).subscribe({next([{height:i},{height:a}]){Fo(e,i),jo(e,a)},complete(){Io(e),Ro(e)}}),qa(e,n).pipe(A(o),I(()=>o.complete()),p(i=>P({ref:e},i)))}function xi(e,t){if(typeof t!="undefined"){let n=`https://api.github.com/repos/${e}/${t}`;return jt(Ee(`${n}/releases/latest`).pipe(p(r=>({version:r.tag_name})),Ge({})),Ee(n).pipe(p(r=>({stars:r.stargazers_count,forks:r.forks_count})),Ge({}))).pipe(p(([r,o])=>P(P({},r),o)))}else{let n=`https://api.github.com/repos/${e}`;return Ee(n).pipe(p(r=>({repositories:r.public_repos})),Ge({}))}}function Si(e,t){let n=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ee(n).pipe(p(({star_count:r,forks_count:o})=>({stars:r,forks:o})),Ge({}))}function wi(e){let[t]=e.match(/(git(?:hub|lab))/i)||[];switch(t.toLowerCase()){case"github":let[,n,r]=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);return xi(n,r);case"gitlab":let[,o,i]=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i);return Si(o,i);default:return ee}}var Ka;function Ba(e){return Ka||(Ka=he(()=>{let t=sessionStorage.getItem(__prefix("__source"));if(t)return k(JSON.parse(t));{let n=wi(e.href);return n.subscribe(r=>{try{sessionStorage.setItem(__prefix("__source"),JSON.stringify(r))}catch(o){}}),n}}).pipe(ct(()=>ee),_(t=>Object.keys(t).length>0),p(t=>({facts:t})),te(1)))}function Ei(e){let t=new T;return t.subscribe(({facts:n})=>{$o(e,zo(n)),Po(e,"done")}),Ba(e).pipe(A(t),I(()=>t.complete()),p(n=>P({ref:e},n)))}function Ja(e,{viewport$:t,header$:n}){return Re(document.body).pipe(E(()=>Vt(e,{header$:n,viewport$:t})),p(({offset:{y:r}})=>({hidden:r>=10})),U("hidden"))}function Oi(e,t){let n=new T;return n.pipe(K(Y)).subscribe({next({hidden:r}){r?Vo(e,"hidden"):mr(e)},complete(){mr(e)}}),(fe("navigation.tabs.sticky")?k({hidden:!1}):Ja(e,t)).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}function Ya(e,{viewport$:t,header$:n}){let r=new Map,o=W("[href^=\\#]",e);for(let s of o){let c=decodeURIComponent(s.hash.substring(1)),l=ce(`[id="${c}"]`);typeof l!="undefined"&&r.set(s,l)}let i=n.pipe(p(s=>24+s.height));return Re(document.body).pipe(U("height"),E(s=>he(()=>{let c=[];return k([...r].reduce((l,[u,m])=>{for(;c.length&&r.get(c[c.length-1]).tagName>=m.tagName;)c.pop();let f=m.offsetTop;for(;!f&&m.parentElement;)m=m.parentElement,f=m.offsetTop;return l.set([...c=[...c,u]].reverse(),f)},new Map))}).pipe(p(c=>new Map([...c].sort(([,l],[,u])=>l-u))),E(c=>B([t,i]).pipe(In(([l,u],[{offset:{y:m},size:f},d])=>{let v=m+f.height>=Math.floor(s.height);for(;u.length;){let[,h]=u[0];if(h-d=m&&!v)u=[l.pop(),...u];else break}return[l,u]},[[],[...c]]),Q((l,u)=>l[0]===u[0]&&l[1]===u[1])))))).pipe(p(([s,c])=>({prev:s.map(([l])=>l),next:c.map(([l])=>l)})),N({prev:[],next:[]}),xe(2,1),p(([s,c])=>s.prev.length{for(let[i]of o)vo(i),ho(i);for(let[i,[a]]of r.entries())bo(a,i===r.length-1),mo(a,"blur");if(fe("navigation.tracking")){let i=we(),a=r[r.length-1];if(a&&a.length){let[s]=a,{hash:c}=new URL(s.href);i.hash!==c&&(i.hash=c,history.replaceState({},"",`${i}`))}else i.hash="",history.replaceState({},"",`${i}`)}}),Ya(e,t).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}function Ga(e,{viewport$:t,main$:n}){let r=t.pipe(p(({offset:{y:i}})=>i),xe(2,1),p(([i,a])=>i>a),Q()),o=n.pipe(U("active"));return B([o,r]).pipe(p(([{active:i},a])=>({hidden:!(i&&a)})),Q((i,a)=>i.hidden===a.hidden))}function _i(e,t){let n=new T;return n.pipe(K(Y)).subscribe({next({hidden:r}){r?Do(e,"hidden"):dr(e)},complete(){dr(e)}}),Ga(e,t).pipe(A(n),I(()=>n.complete()),p(r=>P({ref:e},r)))}function Mi({document$:e,tablet$:t}){e.pipe(E(()=>k(...W("[data-md-state=indeterminate]"))),A(n=>{n.indeterminate=!0,n.checked=!1}),oe(n=>S(n,"change").pipe(It(()=>n.hasAttribute("data-md-state")),G(n))),Se(t)).subscribe(([n,r])=>{n.removeAttribute("data-md-state"),r&&(n.checked=!1)})}function Xa(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Li({document$:e}){e.pipe(E(()=>k(...W("[data-md-scrollfix]"))),A(t=>t.removeAttribute("data-md-scrollfix")),_(Xa),oe(t=>S(t,"touchstart").pipe(G(t)))).subscribe(t=>{let n=t.scrollTop;n===0?t.scrollTop=1:n+t.offsetHeight===t.scrollHeight&&(t.scrollTop=n-1)})}function Ai({viewport$:e,tablet$:t}){B([$t("search"),t]).pipe(p(([n,r])=>n&&!r),E(n=>k(n).pipe(Ce(n?400:100),K(Y))),Se(e)).subscribe(([n,{offset:{y:r}}])=>{n?po(document.body,r):fo(document.body)})}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var tt=Wn(),Ut=Yn(),xr=Zn(),Sr=Bn(),me=so(),Nt=Xe("(min-width: 960px)"),Hi=Xe("(min-width: 1220px)"),ki=eo(),Ci=pe(),ji=document.forms.namedItem("search")?(__search==null?void 0:__search.index)||Ee(`${Ci.base}/search/search_index.json`):ee,wr=new T;ai({alert$:wr});fe("navigation.instant")&&si({document$:tt,location$:Ut,viewport$:me});var Ii;((Ii=Ci.version)==null?void 0:Ii.provider)==="mike"&&mi();V(Ut,xr).pipe(Ce(125)).subscribe(()=>{$e("drawer",!1),$e("search",!1)});Sr.pipe(_(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ce("[href][rel=prev]");typeof t!="undefined"&&t.click();break;case"n":case".":let n=ce("[href][rel=next]");typeof n!="undefined"&&n.click();break}});Mi({document$:tt,tablet$:Nt});Li({document$:tt});Ai({viewport$:me,tablet$:Nt});var Ne=ti(Oe("header"),{viewport$:me}),zt=tt.pipe(p(()=>Oe("main")),E(e=>oi(e,{viewport$:me,header$:Ne})),te(1)),Za=V(...re("dialog").map(e=>ei(e,{alert$:wr})),...re("header").map(e=>ri(e,{viewport$:me,header$:Ne,main$:zt})),...re("palette").map(e=>ii(e)),...re("search").map(e=>gi(e,{index$:ji,keyboard$:Sr})),...re("source").map(e=>Ei(e))),es=he(()=>V(...re("content").map(e=>Zo(e,{target$:xr,viewport$:me,print$:ki})),...re("content").map(e=>fe("search.highlight")?yi(e,{index$:ji,location$:Ut}):ee),...re("header-title").map(e=>ni(e,{viewport$:me,header$:Ne})),...re("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?lr(Hi,()=>yr(e,{viewport$:me,header$:Ne,main$:zt})):lr(Nt,()=>yr(e,{viewport$:me,header$:Ne,main$:zt}))),...re("tabs").map(e=>Oi(e,{viewport$:me,header$:Ne})),...re("toc").map(e=>Ti(e,{viewport$:me,header$:Ne})),...re("top").map(e=>_i(e,{viewport$:me,main$:zt})))),Fi=tt.pipe(E(()=>es),je(Za),te(1));Fi.subscribe();window.document$=tt;window.location$=Ut;window.target$=xr;window.keyboard$=Sr;window.viewport$=me;window.tablet$=Nt;window.screen$=Hi;window.print$=ki;window.alert$=wr;window.component$=Fi;})(); +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */ +/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. +***************************************************************************** */ diff --git a/0.2/assets/javascripts/bundle.d371fdb2.min.js b/0.2/assets/javascripts/bundle.d371fdb2.min.js deleted file mode 100644 index 48ceea7d..00000000 --- a/0.2/assets/javascripts/bundle.d371fdb2.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(e,t){for(var c in t)e[c]=t[c]}(window,function(e){function t(t){for(var a,o,i=t[0],s=t[1],b=t[2],p=0,l=[];pObject(o.a)(new H.a(e=>{for(const t of e)q.next(t)}))).pipe(Object(f.a)(e=>Object(p.a)(Object(o.a)(e),b.a).pipe(Object(U.a)(()=>e.disconnect()))),Object(l.a)({bufferSize:1,refCount:!0}));function W(e){return I.pipe(Object(g.a)(t=>t.observe(e)),Object(f.a)(t=>q.pipe(Object($.a)(({target:t})=>t===e),Object(U.a)(()=>t.unobserve(e)),Object(j.a)(({contentRect:e})=>({width:e.width,height:e.height})))),Object(R.a)(function(e){return{width:e.offsetWidth,height:e.offsetHeight}}(e)))}var D=c(73);var F=c(60);function J(e,t=location){return e.host===t.host&&/^(?:\/[\w-]+)*(?:\/?|\.html)$/i.test(e.pathname)}function Y(e,t=location){return e.pathname===t.pathname&&e.hash.length>0}function K(){return new F.a(new URL(location.href))}function B(){return location.hash.substring(1)}function Q(e){const t=M("a");t.href=e,t.addEventListener("click",e=>e.stopPropagation()),t.click()}var X=c(5);function V(e){const t=matchMedia(e);return new X.a(e=>{t.addListener(t=>e.next(t.matches))}).pipe(Object(R.a)(t.matches),Object(l.a)({bufferSize:1,refCount:!0}))}const G={drawer:T("[data-md-toggle=drawer]"),search:T("[data-md-toggle=search]")};function Z(e,t){G[e].checked!==t&&G[e].click()}function ee(e){const t=G[e];return Object(i.a)(t,"change").pipe(Object(j.a)(()=>t.checked),Object(R.a)(t.checked))}function te(){return{x:Math.max(0,pageXOffset),y:Math.max(0,pageYOffset)}}function ce({x:e,y:t}){window.scrollTo(e||0,t||0)}function ae(){return{width:innerWidth,height:innerHeight}}function ne(e,{header$:t,viewport$:c}){const a=c.pipe(Object(x.a)("size")),n=Object(r.a)([a,t]).pipe(Object(j.a)(()=>({x:e.offsetLeft,y:e.offsetTop})));return Object(r.a)([t,c,n]).pipe(Object(j.a)(([{height:e},{offset:t,size:c},{x:a,y:n}])=>({offset:{x:t.x-a,y:t.y-n+e},size:c})))}var re=c(62),oe=c(63);var ie=c(12),se=c(76);let be;function ue(e){return be.pipe(Object(f.a)(t=>void 0!==t[e]?Object(o.a)(t[e]):ie.a),Object(O.a)())}var pe=c(27),le=c(65);function fe({document$:e,viewport$:t}){return Object(pe.a)(Object(f.a)(c=>{const a=function(e,{document$:t}){return t.pipe(Object(j.a)(()=>{const t=getComputedStyle(e);return["sticky","-webkit-sticky"].includes(t.position)}),Object(O.a)(),Object(f.a)(t=>t?W(e).pipe(Object(j.a)(({height:e})=>({sticky:!0,height:e}))):Object(o.a)({sticky:!1,height:0})),Object(l.a)({bufferSize:1,refCount:!0}))}(c,{document$:e}),n=ue("main").pipe(Object(j.a)(e=>E("h1, h2, h3, h4, h5, h6",e)),Object($.a)(e=>void 0!==e),Object(le.a)(ue("header-title")),Object(f.a)(([e,c])=>ne(e,{header$:a,viewport$:t}).pipe(Object(j.a)(({offset:{y:t}})=>t>=e.offsetHeight?"page":"site"),Object(O.a)(),function(e){return Object(pe.a)(Object(h.a)(u.a),Object(g.a)(t=>{!function(e,t){e.setAttribute("data-md-state",t?"active":"")}(e,"page"===t)}),Object(U.a)(()=>{!function(e){e.removeAttribute("data-md-state")}(e)}))}(c))),Object(R.a)("site"));return Object(r.a)([a,n]).pipe(Object(j.a)(([e,t])=>Object.assign({type:t},e)))}))}var de=c(10);function he({header$:e,viewport$:t}){const c=new N.a;return ue("header").pipe(Object(f.a)(e=>{return c.pipe(Object(x.a)("active"),(t=e,Object(pe.a)(Object(h.a)(u.a),Object(g.a)(({active:e})=>{!function(e,t){e.setAttribute("data-md-state",t?"shadow":"")}(t,e)}),Object(U.a)(()=>{!function(e){e.removeAttribute("data-md-state")}(t)}))));var t})).subscribe(de.a),Object(pe.a)(Object(f.a)(c=>function(e,{header$:t,viewport$:c}){const a=t.pipe(Object(j.a)(({height:e})=>e),Object(O.a)()),n=a.pipe(Object(f.a)(()=>W(e).pipe(Object(j.a)(({height:t})=>({top:e.offsetTop,bottom:e.offsetTop+t})),Object(x.a)("bottom"))));return Object(r.a)([a,n,c]).pipe(Object(j.a)(([e,{top:t,bottom:c},{offset:{y:a},size:{height:n}}])=>({offset:t-e,height:n=Math.max(0,n-Math.max(0,t-a,e)-Math.max(0,n+a-c)),active:t-e<=a})),Object(O.a)((e,t)=>e.offset===t.offset&&e.height===t.height&&e.active===t.active))}(c,{header$:e,viewport$:t})),Object(g.a)(e=>c.next(e)),Object(U.a)(()=>c.complete()))}function je(e){e.style.top=""}function Oe(e,{main$:t,viewport$:c}){const a=e.parentElement.offsetTop-e.parentElement.parentElement.offsetTop;return Object(r.a)([t,c]).pipe(Object(j.a)(([{offset:e,height:t},{offset:{y:c}}])=>({height:t=t+Math.min(a,Math.max(0,c-e))-a,lock:c>=e+a})),Object(O.a)((e,t)=>e.height===t.height&&e.lock===t.lock))}function me(e,{header$:t}){return Object(pe.a)(Object(h.a)(u.a),Object(d.a)(t),Object(g.a)(([{height:t,lock:c},{height:a}])=>{!function(e,t){const c=e.firstElementChild;c.style.height=t-2*c.offsetTop+"px"}(e,t),c?function(e,t){e.style.top=t+"px"}(e,a):je(e)}),Object(j.a)(([e])=>e),Object(U.a)(()=>{je(e),function(e){e.firstElementChild.style.height=""}(e)}))}var ge=c(67);c(43);function ve(e){const t=new RegExp(e.separator,"img"),c=(e,t,c)=>`${t}${c}`;return a=>{a=a.replace(/[\s*+\-:~^]+/g," ").trim();const n=new RegExp(`(^|${e.separator})(${a.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(t,"|")})`,"img");return e=>e.replace(n,c).replace(/<\/mark>(\s+)]*>/gim,"$1")}}function $e(e){return e.split(/"([^"]+)"/g).map((e,t)=>1&t?e.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g," +"):e).join("").replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g,"").trim()}function ye(e,t){if("string"==typeof t||"number"==typeof t)e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(const c of t)ye(e,c)}function we(e,t,...c){const a=document.createElement(e);if(t)for(const e of Object.keys(t))"boolean"!=typeof t[e]?a.setAttribute(e,t[e]):t[e]&&a.setAttribute(e,"");for(const e of c)ye(a,e);return a}let xe;function Se(e,t){if(void 0===xe){const e=T("#__lang");xe=JSON.parse(e.textContent)}if(void 0===xe[e])throw new ReferenceError("Invalid translation: "+e);return void 0!==t?xe[e].replace("#",t.toString()):xe[e]}function _e(e){if(e>999){return((e+1e-6)/1e3).toFixed(+((e-950)%1e3>99))+"k"}return e.toString()}var ke;function Ee(e){return e.type===ke.READY}function Te(e){return e.type===ke.QUERY}function Ae(e){return e.type===ke.RESULT}function Ce({config:e,docs:t,index:c,options:a}){1===e.lang.length&&"en"===e.lang[0]&&(e.lang=[Se("search.config.lang")]),"[\\s\\-]+"===e.separator&&(e.separator=Se("search.config.separator"));const n=Se("search.config.pipeline").split(/\s*,\s*/).filter(Boolean);return{config:e,docs:t,index:c,options:Object.assign(Object.assign({},a),{pipeline:n,suggestions:!0})}}function Me(e,{index$:t,base$:c}){const a=new Worker(e),n=new N.a,r=function(e,{tx$:t}){const c=Object(i.a)(e,"message").pipe(Object(j.a)(({data:e})=>e));return t.pipe(Object(re.a)(()=>c,{leading:!0,trailing:!0}),Object(g.a)(t=>e.postMessage(t)),Object(oe.a)(c),Object(D.a)())}(a,{tx$:n}).pipe(Object(d.a)(c),Object(j.a)(([e,t])=>{if(Ae(e))for(const c of e.data.items)for(const e of c)e.location=`${t}/${e.location}`;return e}),Object(D.a)());return t.pipe(Object(j.a)(e=>({type:ke.SETUP,data:Ce(e)})),Object(h.a)(s.b)).subscribe(n.next.bind(n)),{tx$:n,rx$:r}}!function(e){e[e.SETUP=0]="SETUP",e[e.READY=1]="READY",e[e.QUERY=2]="QUERY",e[e.RESULT=3]="RESULT"}(ke||(ke={}));var Le,Re=c(36);function Pe(e,t){const c=t&Le.PARENT,a=t&Le.TEASER,n=Object.keys(e.terms).filter(t=>!e.terms[t]).map(e=>[we("del",null,e)," "]).flat().slice(0,-1),r=new URL(e.location);r.searchParams.append("h",Object.entries(e.terms).reduce((e,[t,c])=>`${e} ${c?t:""}`,"").replace(/%20/g,"+"));return we("a",{href:""+r,class:"md-search-result__link",tabIndex:-1},we("article",{class:["md-search-result__article",...c?["md-search-result__article--document"]:[]].join(" "),"data-md-score":e.score.toFixed(2)},c>0&&we("div",{class:"md-search-result__icon md-icon"}),we("h1",{class:"md-search-result__title"},e.title),a>0&&e.text.length>0&&we("p",{class:"md-search-result__teaser"},function(e,t){let c=t;if(e.length>c){for(;" "!==e[c]&&--c>0;);return e.substring(0,c)+"..."}return e}(e.text,320)),a>0&&n.length>0&&we("p",{class:"md-search-result__terms"},Se("search.result.term.missing"),": ",n)))}function ze(e,t=1/0){const c=[...e],a=c.findIndex(e=>!e.location.includes("#")),[n]=c.splice(a,1);let r=c.findIndex(e=>e.scorePe(e,Le.TEASER)),...i.length?[we("details",{class:"md-search-result__more"},we("summary",{tabIndex:-1},i.length>0&&1===i.length?Se("search.result.more.one"):Se("search.result.more.other",i.length)),i.map(e=>Pe(e,Le.TEASER)))]:[]];return we("li",{class:"md-search-result__item"},s)}function He(e){return we("ul",{class:"md-source__facts"},e.map(e=>we("li",{class:"md-source__fact"},e)))}function Ne({document$:e,dialog$:t}){if(!Re.isSupported())return b.a;e.subscribe(()=>{C("pre > code").forEach((e,t)=>{const c=e.parentElement;var a;c.id="__code_"+t,c.insertBefore((a=c.id,we("button",{class:"md-clipboard md-icon",title:Se("clipboard.copy"),"data-clipboard-target":`#${a} > code`})),e)})});const c=new X.a(e=>{new Re(".md-clipboard").on("success",t=>e.next(t))}).pipe(Object(D.a)());return c.pipe(Object(g.a)(e=>e.clearSelection()),Object(S.a)(Se("clipboard.copied"))).subscribe(t),c}!function(e){e[e.TEASER=1]="TEASER",e[e.PARENT=2]="PARENT"}(Le||(Le={}));var Ue=c(68),qe=c(77);function Ie(e,{document$:t,viewport$:c,location$:a}){"scrollRestoration"in history&&(history.scrollRestoration="manual"),Object(i.a)(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"});const r=E('link[rel="shortcut icon"]');void 0!==r&&(r.href=r.href);const s=Object(i.a)(document.body,"click").pipe(Object($.a)(e=>!(e.metaKey||e.ctrlKey)),Object(f.a)(t=>{if(t.target instanceof HTMLElement){const c=t.target.closest("a");if(c&&!c.target&&J(c)&&e.includes(c.href))return Y(c)||t.preventDefault(),Object(o.a)(c)}return b.a}),Object(j.a)(e=>({url:new URL(e.href)})),Object(D.a)());s.subscribe(()=>{Z("search",!1)});const u=s.pipe(Object($.a)(({url:e})=>!Y(e)),Object(D.a)()),l=Object(i.a)(window,"popstate").pipe(Object($.a)(e=>null!==e.state),Object(j.a)(e=>({url:new URL(location.href),offset:e.state})),Object(D.a)());Object(p.a)(u,l).pipe(Object(O.a)((e,t)=>e.url.href===t.url.href),Object(j.a)(({url:e})=>e)).subscribe(a);const d=a.pipe(Object(x.a)("pathname"),Object(Ue.a)(1),Object(f.a)(e=>Object(n.a)(fetch(e.href,{credentials:"same-origin"}).then(e=>e.text())).pipe(Object(m.a)(()=>(function(e){location.href=e.href}(e),b.a)))),Object(D.a)());u.pipe(Object(ge.a)(d)).subscribe(({url:e})=>{history.pushState({},"",e.toString())});const h=new DOMParser;d.pipe(Object(j.a)(e=>h.parseFromString(e,"text/html"))).subscribe(t);Object(p.a)(u,l).pipe(Object(ge.a)(t)).subscribe(({url:e,offset:t})=>{e.hash&&!t?Q(e.hash):ce(t||{y:0})}),t.pipe(Object(Ue.a)(1)).subscribe(({title:e,head:t})=>{document.title=e;for(const e of['link[rel="canonical"]','meta[name="author"]','meta[name="description"]']){const c=E(e,t),a=E(e,document.head);void 0!==c&&void 0!==a&&L(a,c)}document.dispatchEvent(new CustomEvent("DOMContentSwitch"))}),c.pipe(Object(qe.a)(250),Object(x.a)("offset")).subscribe(({offset:e})=>{history.replaceState(e,"")}),Object(p.a)(s,l).pipe(Object(w.a)(2,1),Object($.a)(([e,t])=>e.url.pathname===t.url.pathname&&!Y(t.url)),Object(j.a)(([,e])=>e)).subscribe(({offset:e})=>{ce(e||{y:0})})}function We(){const e=Object(i.a)(window,"keydown").pipe(Object($.a)(e=>!(e.metaKey||e.ctrlKey)),Object(j.a)(e=>({type:e.key,claim(){e.preventDefault(),e.stopPropagation()}})),Object(D.a)()).pipe(Object(j.a)(e=>{return Object.assign({mode:(t="search",G[t].checked?"search":"global")},e);var t}),Object($.a)(({mode:e})=>{if("global"===e){const e=A();if(void 0!==e)return!function(e){switch(e.tagName){case"INPUT":case"SELECT":case"TEXTAREA":return!0;default:return e.isContentEditable}}(e)}return!0}),Object(D.a)());return e.pipe(Object($.a)(({mode:e})=>"search"===e),Object(d.a)(ue("search-query"),ue("search-result"),ue("search-suggest"))).subscribe(([e,t,c,a])=>{const n=A();switch(e.type){case"Enter":if(n===t){const t=new Map;for(const e of C(":first-child [href]",c)){const c=e.firstElementChild;t.set(e,parseFloat(c.getAttribute("data-md-score")))}if(t.size){const[[e]]=[...t].sort(([,e],[,t])=>t-e);e.click()}e.claim()}break;case"Escape":case"Tab":Z("search",!1),P(t,!1);break;case"ArrowUp":case"ArrowDown":if(void 0===n)P(t);else{const a=[t,...C(":not(details) > [href], summary, details[open] [href]",c)],r=Math.max(0,(Math.max(0,a.indexOf(n))+a.length+("ArrowUp"===e.type?-1:1))%a.length);P(a[r])}e.claim();break;case"ArrowRight":a.innerText.length&&t.selectionStart===t.value.length&&(t.value=a.innerText);break;default:t!==A()&&P(t)}}),e.pipe(Object($.a)(({mode:e})=>"global"===e),Object(d.a)(ue("search-query"))).subscribe(([e,t])=>{switch(e.type){case"f":case"s":case"/":P(t),function(e){if(!(e instanceof HTMLInputElement))throw new Error("Not implemented");e.select()}(t),e.claim();break;case"p":case",":const c=E("[href][rel=prev]");void 0!==c&&c.click();break;case"n":case".":const a=E("[href][rel=next]");void 0!==a&&a.click()}}),e}function De(e){e.placeholder=Se("search.placeholder")}function Fe(e,{transform:t}={}){const c=t||$e,a=Object(p.a)(Object(i.a)(e,"keyup"),Object(i.a)(e,"focus").pipe(Object(v.a)(1))).pipe(Object(j.a)(()=>c(e.value)),Object(R.a)(c(e.value)),Object(O.a)()),n=function(e){return Object(p.a)(Object(i.a)(e,"focus"),Object(i.a)(e,"blur")).pipe(Object(j.a)(({type:e})=>"focus"===e),Object(R.a)(e===A()))}(e);return Object(r.a)([a,n]).pipe(Object(j.a)(([e,t])=>({value:e,focus:t})))}function Je({tx$:e},t={}){return Object(pe.a)(Object(f.a)(c=>{const a=Fe(c,t);return a.pipe(Object(x.a)("value"),Object(j.a)(({value:e})=>({type:ke.QUERY,data:e}))).subscribe(e.next.bind(e)),a.pipe(Object(x.a)("focus")).subscribe(({focus:e})=>{e&&Z("search",e)}),a.pipe(function(e){return Object(pe.a)(Object(g.a)(({focus:t})=>{t?function(e,t){e.placeholder=t}(e,""):De(e)}),Object(U.a)(()=>{De(e)}))}(c))}))}function Ye(){return Object(pe.a)(Object(f.a)(e=>function(e){return Object(i.a)(e,"click").pipe(Object(S.a)(void 0))}(e).pipe(Object(oe.a)(ue("search-query")),Object(g.a)(P),Object(S.a)(void 0))),Object(R.a)(void 0))}function Ke(e,t){e.appendChild(t)}function Be(e,{query$:t,fetch$:c}){const a=T(".md-search-result__list",e),n=T(".md-search-result__meta",e);return Object(pe.a)(Object(d.a)(t),Object(j.a)(([e,t])=>{const{items:c}=e;return t.value?function(e,t){switch(t){case 0:e.textContent=Se("search.result.none");break;case 1:e.textContent=Se("search.result.one");break;default:e.textContent=Se("search.result.other",t)}}(n,c.length):function(e){e.textContent=Se("search.result.placeholder")}(n),e}),Object(f.a)(t=>{const{items:n}=t,r=[...n.map(([e])=>e.score),0];return c.pipe(Object(h.a)(u.a),Object(se.a)(t=>{const c=e.parentElement;for(;t16)););return t},0),Object(S.a)(t),Object(U.a)(()=>{!function(e){e.innerHTML=""}(a)}))}))}function Qe({rx$:e},{query$:t}){return Object(pe.a)(Object(f.a)(c=>{const a=c.parentElement,n=function(e){return Object(p.a)(Object(i.a)(e,"scroll"),Object(i.a)(window,"resize")).pipe(Object(j.a)(()=>z(e)),Object(R.a)(z(e)))}(a).pipe(Object(j.a)(({y:e})=>e>=a.scrollHeight-a.offsetHeight-16),Object(O.a)(),Object($.a)(Boolean));return e.pipe(Object($.a)(Ae),Object(j.a)(({data:e})=>e),Be(c,{query$:t,fetch$:n}))}))}function Xe({header$:e,viewport$:t,screen$:c}){return Object(pe.a)(Object(f.a)(a=>c.pipe(Object(f.a)(c=>c?ne(a,{header$:e,viewport$:t}).pipe(Object(j.a)(({offset:{y:e}})=>({hidden:e>=10})),Object(x.a)("hidden"),function(e){return Object(pe.a)(Object(h.a)(u.a),Object(g.a)(({hidden:t})=>{!function(e,t){e.setAttribute("data-md-state",t?"hidden":"")}(e,t)}),Object(U.a)(()=>{!function(e){e.removeAttribute("data-md-state")}(e)}))}(a)):Object(o.a)({hidden:!0})))))}function Ve(e){e.removeAttribute("data-md-state")}function Ge(e){e.classList.remove("md-nav__link--active")}function Ze({header$:e,main$:t,viewport$:c,tablet$:a}){return Object(pe.a)(Object(f.a)(n=>a.pipe(Object(f.a)(a=>{if(a){const a=C(".md-nav__link",n),o=Oe(n,{main$:t,viewport$:c}).pipe(me(n,{header$:e})),i=function(e,{header$:t,viewport$:c}){const a=new Map;for(const t of e){const e=E(`[id="${decodeURIComponent(t.hash.substring(1))}"]`);void 0!==e&&a.set(t,e)}const n=t.pipe(Object(j.a)(e=>18+e.height));return W(document.body).pipe(Object(x.a)("height"),Object(j.a)(()=>{let e=[];return[...a].reduce((t,[c,n])=>{for(;e.length;){if(!(a.get(e[e.length-1]).tagName>=n.tagName))break;e.pop()}let r=n.offsetTop;for(;!r&&n.parentElement;)r=(n=n.parentElement).offsetTop;return t.set([...e=[...e,c]].reverse(),r)},new Map)}),Object(f.a)(e=>Object(r.a)([n,c]).pipe(Object(se.a)(([e,t],[c,{offset:{y:a}}])=>{for(;t.length;){const[,n]=t[0];if(!(n-c=a))break;t=[e.pop(),...t]}return[e,t]},[[],[...e]]),Object(O.a)((e,t)=>e[0]===t[0]&&e[1]===t[1])))).pipe(Object(j.a)(([e,t])=>({prev:e.map(([e])=>e),next:t.map(([e])=>e)})),Object(R.a)({prev:[],next:[]}),Object(w.a)(2,1),Object(j.a)(([e,t])=>e.prev.length{for(const[e]of t)Ge(e),Ve(e);e.forEach(([t],c)=>{!function(e,t){e.classList.toggle("md-nav__link--active",t)}(t,c===e.length-1),function(e,t){e.setAttribute("data-md-state",t?"blur":"")}(t,!0)})}),Object(U.a)(()=>{for(const t of e)Ge(t),Ve(t)}))}(a));return Object(r.a)([o,i]).pipe(Object(j.a)(([e,t])=>({sidebar:e,anchors:t})))}return Object(o.a)({})}))))}var et=c(71);var tt=c(72);function ct(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function at(e){const[t]=e.match(/(git(?:hub|lab))/i)||[];switch(t.toLowerCase()){case"github":const[,t,c]=e.match(/^.+github\.com\/([^\/]+)\/?([^\/]+)?/i);return function(e,t){const c=void 0!==t?`https://api.github.com/repos/${e}/${t}`:"https://api.github.com/users/"+e;return Object(n.a)(fetch(c).then(e=>e.json())).pipe(Object(j.a)(e=>{if(void 0!==t){const{stargazers_count:t,forks_count:c}=e;return[_e(t||0)+" Stars",_e(c||0)+" Forks"]}{const{public_repos:t}=e;return[_e(t||0)+" Repositories"]}}))}(t,c);case"gitlab":const[,a,r]=e.match(/^.+?([^\/]*gitlab[^\/]+)\/(.+?)\/?$/i);return function(e,t){const c=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Object(n.a)(fetch(c).then(e=>e.json())).pipe(Object(j.a)(({star_count:e,forks_count:t})=>[_e(e)+" Stars",_e(t)+" Forks"]))}(a,r);default:return b.a}}function nt({document$:e}){e.pipe(Object(j.a)(()=>T(".md-source[href]")),Object(f.a)(({href:e})=>{return t=""+function(e){let t=0;for(let c=0,a=e.length;cat(e),Object(a.a)(()=>{const e=sessionStorage.getItem(t);if(e)return Object(o.a)(JSON.parse(e));{const e=c();return e.subscribe(e=>{try{sessionStorage.setItem(t,JSON.stringify(e))}catch(e){}}),e}});var t,c}),Object(m.a)(()=>b.a)).subscribe(e=>{for(const t of C(".md-source__repository"))t.hasAttribute("data-md-state")||(t.setAttribute("data-md-state","done"),t.appendChild(He(e)))})}function rt(e,t){e.setAttribute("data-md-state","lock"),e.style.top=`-${t}px`}function ot(e){const t=-1*parseInt(e.style.top,10);e.removeAttribute("data-md-state"),e.style.top="",t&&window.scrollTo(0,t)}function it(e){if(!function(e){return"object"==typeof e&&"string"==typeof e.base&&"object"==typeof e.features&&"object"==typeof e.search}(e))throw new SyntaxError("Invalid configuration: "+JSON.stringify(e));const t=function(){const e=new k.a;return Object(i.a)(document,"DOMContentLoaded").pipe(Object(S.a)(document)).subscribe(e),e}(),c=K(),T=function(e,{location$:t}){return t.pipe(Object(y.a)(1),Object(j.a)(({href:t})=>new URL(e,t).toString().replace(/\/$/,"")),Object(l.a)({bufferSize:1,refCount:!0}))}(e.base,{location$:c}),A=Object(i.a)(window,"hashchange").pipe(Object(j.a)(B),Object(R.a)(B()),Object($.a)(e=>e.length>0),Object(D.a)()),P=Object(r.a)([Object(p.a)(Object(i.a)(window,"scroll",{passive:!0}),Object(i.a)(window,"resize",{passive:!0})).pipe(Object(j.a)(te),Object(R.a)(te())),Object(i.a)(window,"resize",{passive:!0}).pipe(Object(j.a)(ae),Object(R.a)(ae()))]).pipe(Object(j.a)(([e,t])=>({offset:e,size:t})),Object(l.a)({bufferSize:1,refCount:!0})),z=V("(min-width: 960px)"),H=V("(min-width: 1220px)");!function(e,{document$:t}){be=t.pipe(Object(j.a)(t=>e.reduce((e,c)=>{const a=E(`[data-md-component=${c}]`,t);return Object.assign(Object.assign({},e),void 0!==a?{[c]:a}:{})},{})),Object(se.a)((t,c)=>{for(const a of e)switch(a){case"announce":case"header-topic":case"container":case"skip":a in t&&void 0!==t[a]&&(L(t[a],c[a]),t[a]=c[a]);break;default:void 0!==c[a]?t[a]=E(`[data-md-component=${a}]`):delete t[a]}return t}),Object(l.a)({bufferSize:1,refCount:!0}))}(["announce","container","header","header-title","header-topic","main","navigation","search","search-query","search-reset","search-result","search-suggest","skip","tabs","toc"],{document$:t});const U=We();matchMedia("(hover)").matches&&function({document$:e,viewport$:t}){const c=e.pipe(Object(j.a)(()=>C("pre > code"))),a=t.pipe(Object(x.a)("size"));Object(r.a)([c,a]).subscribe(([e])=>{for(const t of e)t.scrollWidth>t.clientWidth?t.setAttribute("tabindex","0"):t.removeAttribute("tabindex")})}({document$:t,viewport$:P}),function({document$:e,hash$:t}){const c=e.pipe(Object(j.a)(()=>C("details")));Object(p.a)(V("print").pipe(Object($.a)(Boolean)),Object(i.a)(window,"beforeprint")).pipe(Object(oe.a)(c)).subscribe(e=>{for(const t of e)t.setAttribute("open","")}),t.pipe(Object(j.a)(e=>E(`[id="${e}"]`)),Object($.a)(e=>void 0!==e),Object(g.a)(e=>{const t=e.closest("details");t&&!t.open&&t.setAttribute("open","")})).subscribe(e=>e.scrollIntoView())}({document$:t,hash$:A}),function({document$:e}){e.pipe(Object(Ue.a)(1),Object(d.a)(ue("container")),Object(j.a)(([,e])=>C("script",e))).pipe(Object(f.a)(e=>Object(o.a)(...e)),Object(et.a)(e=>{const t=M("script");return e.src?(t.src=e.src,L(e,t),new X.a(e=>{t.onload=()=>e.complete()})):(t.textContent=e.textContent,L(e,t),ie.a)})).subscribe(de.a)}({document$:t}),nt({document$:t}),function({document$:e}){const t=M("table");e.pipe(Object(j.a)(()=>C("table:not([class])"))).subscribe(e=>{for(const c of e)L(c,t),L(t,we("div",{class:"md-typeset__scrollwrap"},we("div",{class:"md-typeset__table"},c)))})}({document$:t}),function({document$:e}){const t=e.pipe(Object(j.a)(()=>C("[data-md-scrollfix]")),Object(l.a)({bufferSize:1,refCount:!0}));t.subscribe(e=>{for(const t of e)t.removeAttribute("data-md-scrollfix")}),Object(tt.a)(ct,t,b.a).pipe(Object(f.a)(e=>Object(p.a)(...e.map(e=>Object(i.a)(e,"touchstart").pipe(Object(S.a)(e)))))).subscribe(e=>{const t=e.scrollTop;0===t?e.scrollTop=1:t+e.offsetHeight===e.scrollHeight&&(e.scrollTop=t-1)})}({document$:t});const q=function({duration:e}={}){const t=new N.a,c=M("div");return c.classList.add("md-dialog","md-typeset"),t.pipe(Object(f.a)(t=>Object(o.a)(document.body).pipe(Object(j.a)(e=>e.appendChild(c)),Object(h.a)(u.a),Object(v.a)(1),Object(g.a)(e=>{e.innerHTML=t,e.setAttribute("data-md-state","open")}),Object(v.a)(e||2e3),Object(g.a)(e=>e.removeAttribute("data-md-state")),Object(v.a)(400),Object(g.a)(e=>{e.innerHTML="",e.remove()})))).subscribe(de.a),t}(),I=Ne({document$:t,dialog$:q}),W=ue("header").pipe(fe({document$:t,viewport$:P}),Object(l.a)({bufferSize:1,refCount:!0})),F=ue("main").pipe(he({header$:W,viewport$:P}),Object(l.a)({bufferSize:1,refCount:!0})),Y=ue("navigation").pipe(function({header$:e,main$:t,viewport$:c,screen$:a}){return Object(pe.a)(Object(f.a)(n=>a.pipe(Object(f.a)(a=>a?Oe(n,{main$:t,viewport$:c}).pipe(me(n,{header$:e}),Object(j.a)(e=>({sidebar:e}))):Object(o.a)({})))))}({header$:W,main$:F,viewport$:P,screen$:H}),Object(l.a)({bufferSize:1,refCount:!0})),G=ue("toc").pipe(Ze({header$:W,main$:F,viewport$:P,tablet$:z}),Object(l.a)({bufferSize:1,refCount:!0})),ce=ue("tabs").pipe(Xe({header$:W,viewport$:P,screen$:H}),Object(l.a)({bufferSize:1,refCount:!0})),ne=ue("search").pipe(Object(f.a)(()=>Object(a.a)(()=>{const t=e.search&&e.search.index?e.search.index:void 0,a=void 0!==t?Object(n.a)(t):T.pipe(Object(f.a)(e=>fetch(e+"/search/search_index.json",{credentials:"same-origin"}).then(e=>e.json())));return e.features.includes("search.highlight")&&Object(r.a)([c,a]).subscribe(([e,t])=>{if(!e.searchParams.has("h"))return;const c=ve(t.config)(e.searchParams.get("h"));let a=e.hash?E(`[id="${e.hash.slice(1)}"]`):E("article");if(void 0!==a)for(;a;){const e=document.createNodeIterator(a,NodeFilter.SHOW_TEXT),t=[];for(;;){const c=e.nextNode();if(!c)break;t.push(c)}for(const e of t)e.textContent.trim()&&e.replaceWith(we("span",null,c(e.textContent)));if("article"===a.tagName)break;{const e=a.nextSibling;if(e instanceof HTMLElement&&e.tagName.match(/^H[1-6]/))break;a=e}}}),Object(o.a)(Me(e.search.worker,{base$:T,index$:a}))}))).pipe(Object(f.a)(t=>{const c=ue("search-query").pipe(Je(t,{transform:e.search.transform}),Object(l.a)({bufferSize:1,refCount:!0})),a=ue("search-reset").pipe(Ye(),Object(l.a)({bufferSize:1,refCount:!0})),n=ue("search-result").pipe(Qe(t,{query$:c}),Object(l.a)({bufferSize:1,refCount:!0}));return e.features.includes("search.suggest")&&(n.pipe(Object(d.a)(c)).subscribe(([{suggestions:e},t])=>{if(void 0!==e){const c=document.querySelector(".md-search__suggest"),a=t.value.split(/([\s-]+)/);if(e.length){const[n]=e.slice(-1);if(e.length>=t.value.split(/[\s-]+/).length&&n.startsWith(a[a.length-1])){const e=document.createElement("span");e.innerHTML=[...a.slice(0,-1),n].join(""),c.innerHTML="",c.appendChild(e)}else c.innerHTML=""}else c.innerHTML=""}}),ue("search-query").pipe(Object(f.a)(e=>Object(i.a)(e,"keydown").pipe(Object(h.a)(s.b),Object(j.a)(()=>e.value),Object(O.a)(),Object(j.a)(()=>{const t=document.querySelector(".md-search__suggest span");t&&(t.innerHTML.startsWith(e.value)&&!e.value.endsWith(" ")&&0!==e.value.length||(t.innerHTML=""))})))).subscribe()),ue("search").pipe(function({rx$:e,tx$:t},{query$:c,reset$:a,result$:n}){return Object(pe.a)(Object(f.a)(()=>{const o=e.pipe(Object($.a)(Ee),Object(S.a)("ready"),Object(R.a)("waiting"));return t.pipe(Object($.a)(Te),Object(ge.a)(o),Object(y.a)(1)).subscribe(t.next.bind(t)),Object(r.a)([o,c,n,a]).pipe(Object(j.a)(([e,t,c])=>({status:e,query:t,result:c})))}))}(t,{query$:c,reset$:a,result$:n}))}),Object(m.a)(()=>(ue("search").subscribe(e=>e.hidden=!0),b.a)),Object(l.a)({bufferSize:1,refCount:!0}));if(A.pipe(Object(g.a)(()=>Z("search",!1)),Object(v.a)(125)).subscribe(e=>Q("#"+e)),Object(r.a)([ee("search"),z]).pipe(Object(d.a)(P),Object(f.a)(([[e,c],{offset:{y:a}}])=>{const n=e&&!c;return t.pipe(Object(v.a)(n?400:100),Object(h.a)(u.a),Object(g.a)(({body:e})=>n?rt(e,a):ot(e)))})).subscribe(),Object(i.a)(document.body,"click").pipe(Object($.a)(e=>!(e.metaKey||e.ctrlKey)),Object($.a)(e=>{if(e.target instanceof HTMLElement){const t=e.target.closest("a");if(t&&J(t))return!0}return!1})).subscribe(()=>{Z("drawer",!1)}),e.features.includes("navigation.instant")&&"file:"!==location.protocol){const e=new DOMParser;T.pipe(Object(f.a)(t=>Object(n.a)(fetch(t+"/sitemap.xml").then(e=>e.text()).then(t=>e.parseFromString(t,"text/xml")))),Object(d.a)(T),Object(j.a)(([e,t])=>{const c=C("loc",e).map(e=>e.textContent);if(c.length>1){const[e,a]=c.sort((e,t)=>e.length-t.length);let n=0;if(e===a)n=e.length;else for(;e.charAt(n)===a.charAt(n);)n++;for(let a=0;a{Ie(e,{document$:t,location$:c,viewport$:P})})}U.pipe(Object($.a)(e=>"global"===e.mode&&"Tab"===e.type),Object(y.a)(1)).subscribe(()=>{for(const e of C(".headerlink"))e.style.visibility="visible"}),t.subscribe(()=>{const e=C("[data-md-state=indeterminate]");for(const t of e)t.dataset.mdState="",t.indeterminate=!0,t.checked=!1});const re=C("[data-md-option=palette]");for(let e=0;e{for(const e of["mdColorScheme","mdColorPrimary","mdColorAccent"])t.dataset[e]&&(document.body.dataset[e]=t.dataset[e]);re[e].dataset.mdState="hidden";const c=re[(e+1)%re.length];c.dataset.mdState="",c.focus(),localStorage.setItem("__palette",JSON.stringify({index:e,color:{scheme:t.dataset.mdColorScheme,primary:t.dataset.mdColorPrimary,accent:t.dataset.mdColorAccent}}))})}if(re.length){const{index:e}=JSON.parse(localStorage.getItem("__palette")||'{ "index": 0 }');re[(+e+1)%re.length].dataset.mdState=""}if(e.features.includes("header.autohide")&&P.pipe(Object(j.a)(({offset:e})=>e.y),Object(w.a)(2,1),Object(j.a)(([e,t])=>[eP.pipe(Object(j.a)(({offset:e})=>e.y),Object($.a)(e=>e>400),Object(j.a)(e=>Math.abs(t-e)),Object($.a)(e=>e>100),Object(S.a)(e),Object(y.a)(1)))).subscribe(e=>{const t=E("[data-md-component=header]");null==t||t.setAttribute("data-md-state",e?"hidden":"shadow")}),void 0!==e.version&&"mike"===e.version.method){const e=T.pipe(Object(f.a)(e=>fetch(e+"/../versions.json",{credentials:"same-origin"}).then(e=>e.json())),Object(m.a)(()=>(console.log("Couldn't load versions.json"),b.a)));ue("header-title").pipe(Object(j.a)(e=>e.querySelector(".md-header-nav__topic")),Object(_.a)(T,e)).subscribe(([e,t,c])=>{e.appendChild(function(e,t){const[,c]=e.match(/([^\/]+)\/?$/);return we("div",{class:"md-version"},we("span",{class:"md-version__current"},(t.find(({version:e})=>e===c)||t[0]).version),we("ul",{class:"md-version__list"},t.map(t=>we("li",{class:"md-version__item"},we("a",{class:"md-version__link",href:`${e}/../${t.version}/`},t.title)))))}(t,c))})}const le={document$:t,location$:c,viewport$:P,header$:W,main$:F,navigation$:Y,search$:ne,tabs$:ce,toc$:G,clipboard$:I,keyboard$:U,dialog$:q};return Object(p.a)(...Object.values(le)).subscribe(),le}document.documentElement.classList.remove("no-js"),document.documentElement.classList.add("js"),navigator.userAgent.match(/(iPad|iPhone|iPod)/g)&&document.documentElement.classList.add("ios")}})); \ No newline at end of file diff --git a/0.2/assets/javascripts/lunr/tinyseg.js b/0.2/assets/javascripts/lunr/tinyseg.js new file mode 100644 index 00000000..167fa6dd --- /dev/null +++ b/0.2/assets/javascripts/lunr/tinyseg.js @@ -0,0 +1,206 @@ +/** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ +;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + factory()(root.lunr); + } +}(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + + return function(lunr) { + // TinySegmenter 0.1 -- Super compact Japanese tokenizer in Javascript + // (c) 2008 Taku Kudo + // TinySegmenter is freely distributable under the terms of a new BSD licence. + // For details, see http://chasen.org/~taku/software/TinySegmenter/LICENCE.txt + + function TinySegmenter() { + var patterns = { + "[一二三四五六七八九十百千万億兆]":"M", + "[一-龠々〆ヵヶ]":"H", + "[ぁ-ん]":"I", + "[ァ-ヴーア-ン゙ー]":"K", + "[a-zA-Za-zA-Z]":"A", + "[0-90-9]":"N" + } + this.chartype_ = []; + for (var i in patterns) { + var regexp = new RegExp(i); + this.chartype_.push([regexp, patterns[i]]); + } + + this.BIAS__ = -332 + this.BC1__ = {"HH":6,"II":2461,"KH":406,"OH":-1378}; + this.BC2__ = {"AA":-3267,"AI":2744,"AN":-878,"HH":-4070,"HM":-1711,"HN":4012,"HO":3761,"IA":1327,"IH":-1184,"II":-1332,"IK":1721,"IO":5492,"KI":3831,"KK":-8741,"MH":-3132,"MK":3334,"OO":-2920}; + this.BC3__ = {"HH":996,"HI":626,"HK":-721,"HN":-1307,"HO":-836,"IH":-301,"KK":2762,"MK":1079,"MM":4034,"OA":-1652,"OH":266}; + this.BP1__ = {"BB":295,"OB":304,"OO":-125,"UB":352}; + this.BP2__ = {"BO":60,"OO":-1762}; + this.BQ1__ = {"BHH":1150,"BHM":1521,"BII":-1158,"BIM":886,"BMH":1208,"BNH":449,"BOH":-91,"BOO":-2597,"OHI":451,"OIH":-296,"OKA":1851,"OKH":-1020,"OKK":904,"OOO":2965}; + this.BQ2__ = {"BHH":118,"BHI":-1159,"BHM":466,"BIH":-919,"BKK":-1720,"BKO":864,"OHH":-1139,"OHM":-181,"OIH":153,"UHI":-1146}; + this.BQ3__ = {"BHH":-792,"BHI":2664,"BII":-299,"BKI":419,"BMH":937,"BMM":8335,"BNN":998,"BOH":775,"OHH":2174,"OHM":439,"OII":280,"OKH":1798,"OKI":-793,"OKO":-2242,"OMH":-2402,"OOO":11699}; + this.BQ4__ = {"BHH":-3895,"BIH":3761,"BII":-4654,"BIK":1348,"BKK":-1806,"BMI":-3385,"BOO":-12396,"OAH":926,"OHH":266,"OHK":-2036,"ONN":-973}; + this.BW1__ = {",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682}; + this.BW2__ = {"..":-11822,"11":-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669}; + this.BW3__ = {"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1000,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990}; + this.TC1__ = {"AAA":1093,"HHH":1029,"HHM":580,"HII":998,"HOH":-390,"HOM":-331,"IHI":1169,"IOH":-142,"IOI":-1015,"IOM":467,"MMH":187,"OOI":-1832}; + this.TC2__ = {"HHO":2088,"HII":-1023,"HMM":-1154,"IHI":-1965,"KKH":703,"OII":-2649}; + this.TC3__ = {"AAA":-294,"HHH":346,"HHI":-341,"HII":-1088,"HIK":731,"HOH":-1486,"IHH":128,"IHI":-3041,"IHO":-1935,"IIH":-825,"IIM":-1035,"IOI":-542,"KHH":-1216,"KKA":491,"KKH":-1217,"KOK":-1009,"MHH":-2694,"MHM":-457,"MHO":123,"MMH":-471,"NNH":-1689,"NNO":662,"OHO":-3393}; + this.TC4__ = {"HHH":-203,"HHI":1344,"HHK":365,"HHM":-122,"HHN":182,"HHO":669,"HIH":804,"HII":679,"HOH":446,"IHH":695,"IHO":-2324,"IIH":321,"III":1497,"IIO":656,"IOO":54,"KAK":4845,"KKA":3386,"KKK":3065,"MHH":-405,"MHI":201,"MMH":-241,"MMM":661,"MOM":841}; + this.TQ1__ = {"BHHH":-227,"BHHI":316,"BHIH":-132,"BIHH":60,"BIII":1595,"BNHH":-744,"BOHH":225,"BOOO":-908,"OAKK":482,"OHHH":281,"OHIH":249,"OIHI":200,"OIIH":-68}; + this.TQ2__ = {"BIHH":-1401,"BIII":-1033,"BKAK":-543,"BOOO":-5591}; + this.TQ3__ = {"BHHH":478,"BHHM":-1073,"BHIH":222,"BHII":-504,"BIIH":-116,"BIII":-105,"BMHI":-863,"BMHM":-464,"BOMH":620,"OHHH":346,"OHHI":1729,"OHII":997,"OHMH":481,"OIHH":623,"OIIH":1344,"OKAK":2792,"OKHH":587,"OKKA":679,"OOHH":110,"OOII":-685}; + this.TQ4__ = {"BHHH":-721,"BHHM":-3604,"BHII":-966,"BIIH":-607,"BIII":-2181,"OAAA":-2763,"OAKK":180,"OHHH":-294,"OHHI":2446,"OHHO":480,"OHIH":-1573,"OIHH":1935,"OIHI":-493,"OIIH":626,"OIII":-4007,"OKAK":-8156}; + this.TW1__ = {"につい":-4681,"東京都":2026}; + this.TW2__ = {"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216}; + this.TW3__ = {"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287}; + this.TW4__ = {"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865}; + this.UC1__ = {"A":484,"K":93,"M":645,"O":-505}; + this.UC2__ = {"A":819,"H":1059,"I":409,"M":3987,"N":5775,"O":646}; + this.UC3__ = {"A":-1370,"I":2311}; + this.UC4__ = {"A":-2643,"H":1809,"I":-1032,"K":-3450,"M":3565,"N":3876,"O":6646}; + this.UC5__ = {"H":313,"I":-1238,"K":-799,"M":539,"O":-831}; + this.UC6__ = {"H":-506,"I":-253,"K":87,"M":247,"O":-387}; + this.UP1__ = {"O":-214}; + this.UP2__ = {"B":69,"O":935}; + this.UP3__ = {"B":189}; + this.UQ1__ = {"BH":21,"BI":-12,"BK":-99,"BN":142,"BO":-56,"OH":-95,"OI":477,"OK":410,"OO":-2422}; + this.UQ2__ = {"BH":216,"BI":113,"OK":1759}; + this.UQ3__ = {"BA":-479,"BH":42,"BI":1913,"BK":-7198,"BM":3160,"BN":6427,"BO":14761,"OI":-827,"ON":-3212}; + this.UW1__ = {",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135}; + this.UW2__ = {",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568}; + this.UW3__ = {",":4889,"1":-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278}; + this.UW4__ = {",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1000,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637}; + this.UW5__ = {",":465,".":-299,"1":-514,"E2":-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343}; + this.UW6__ = {",":227,".":808,"1":-270,"E1":306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496}; + + return this; + } + TinySegmenter.prototype.ctype_ = function(str) { + for (var i in this.chartype_) { + if (str.match(this.chartype_[i][0])) { + return this.chartype_[i][1]; + } + } + return "O"; + } + + TinySegmenter.prototype.ts_ = function(v) { + if (v) { return v; } + return 0; + } + + TinySegmenter.prototype.segment = function(input) { + if (input == null || input == undefined || input == "") { + return []; + } + var result = []; + var seg = ["B3","B2","B1"]; + var ctype = ["O","O","O"]; + var o = input.split(""); + for (i = 0; i < o.length; ++i) { + seg.push(o[i]); + ctype.push(this.ctype_(o[i])) + } + seg.push("E1"); + seg.push("E2"); + seg.push("E3"); + ctype.push("O"); + ctype.push("O"); + ctype.push("O"); + var word = seg[3]; + var p1 = "U"; + var p2 = "U"; + var p3 = "U"; + for (var i = 4; i < seg.length - 3; ++i) { + var score = this.BIAS__; + var w1 = seg[i-3]; + var w2 = seg[i-2]; + var w3 = seg[i-1]; + var w4 = seg[i]; + var w5 = seg[i+1]; + var w6 = seg[i+2]; + var c1 = ctype[i-3]; + var c2 = ctype[i-2]; + var c3 = ctype[i-1]; + var c4 = ctype[i]; + var c5 = ctype[i+1]; + var c6 = ctype[i+2]; + score += this.ts_(this.UP1__[p1]); + score += this.ts_(this.UP2__[p2]); + score += this.ts_(this.UP3__[p3]); + score += this.ts_(this.BP1__[p1 + p2]); + score += this.ts_(this.BP2__[p2 + p3]); + score += this.ts_(this.UW1__[w1]); + score += this.ts_(this.UW2__[w2]); + score += this.ts_(this.UW3__[w3]); + score += this.ts_(this.UW4__[w4]); + score += this.ts_(this.UW5__[w5]); + score += this.ts_(this.UW6__[w6]); + score += this.ts_(this.BW1__[w2 + w3]); + score += this.ts_(this.BW2__[w3 + w4]); + score += this.ts_(this.BW3__[w4 + w5]); + score += this.ts_(this.TW1__[w1 + w2 + w3]); + score += this.ts_(this.TW2__[w2 + w3 + w4]); + score += this.ts_(this.TW3__[w3 + w4 + w5]); + score += this.ts_(this.TW4__[w4 + w5 + w6]); + score += this.ts_(this.UC1__[c1]); + score += this.ts_(this.UC2__[c2]); + score += this.ts_(this.UC3__[c3]); + score += this.ts_(this.UC4__[c4]); + score += this.ts_(this.UC5__[c5]); + score += this.ts_(this.UC6__[c6]); + score += this.ts_(this.BC1__[c2 + c3]); + score += this.ts_(this.BC2__[c3 + c4]); + score += this.ts_(this.BC3__[c4 + c5]); + score += this.ts_(this.TC1__[c1 + c2 + c3]); + score += this.ts_(this.TC2__[c2 + c3 + c4]); + score += this.ts_(this.TC3__[c3 + c4 + c5]); + score += this.ts_(this.TC4__[c4 + c5 + c6]); + // score += this.ts_(this.TC5__[c4 + c5 + c6]); + score += this.ts_(this.UQ1__[p1 + c1]); + score += this.ts_(this.UQ2__[p2 + c2]); + score += this.ts_(this.UQ3__[p3 + c3]); + score += this.ts_(this.BQ1__[p2 + c2 + c3]); + score += this.ts_(this.BQ2__[p2 + c3 + c4]); + score += this.ts_(this.BQ3__[p3 + c2 + c3]); + score += this.ts_(this.BQ4__[p3 + c3 + c4]); + score += this.ts_(this.TQ1__[p2 + c1 + c2 + c3]); + score += this.ts_(this.TQ2__[p2 + c2 + c3 + c4]); + score += this.ts_(this.TQ3__[p3 + c1 + c2 + c3]); + score += this.ts_(this.TQ4__[p3 + c2 + c3 + c4]); + var p = "O"; + if (score > 0) { + result.push(word); + word = ""; + p = "B"; + } + p1 = p2; + p2 = p3; + p3 = p; + word += seg[i]; + } + result.push(word); + + return result; + } + + lunr.TinySegmenter = TinySegmenter; + }; + +})); \ No newline at end of file diff --git a/0.2/assets/javascripts/lunr/tinyseg.min.js b/0.2/assets/javascripts/lunr/tinyseg.min.js deleted file mode 100644 index 302befbb..00000000 --- a/0.2/assets/javascripts/lunr/tinyseg.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(_,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(_.lunr)}(this,(function(){return function(_){function t(){var _={"[一二三四五六七八九十百千万億兆]":"M","[一-龠々〆ヵヶ]":"H","[ぁ-ん]":"I","[ァ-ヴーア-ン゙ー]":"K","[a-zA-Za-zA-Z]":"A","[0-90-9]":"N"};for(var t in this.chartype_=[],_){var H=new RegExp(t);this.chartype_.push([H,_[t]])}return this.BIAS__=-332,this.BC1__={HH:6,II:2461,KH:406,OH:-1378},this.BC2__={AA:-3267,AI:2744,AN:-878,HH:-4070,HM:-1711,HN:4012,HO:3761,IA:1327,IH:-1184,II:-1332,IK:1721,IO:5492,KI:3831,KK:-8741,MH:-3132,MK:3334,OO:-2920},this.BC3__={HH:996,HI:626,HK:-721,HN:-1307,HO:-836,IH:-301,KK:2762,MK:1079,MM:4034,OA:-1652,OH:266},this.BP1__={BB:295,OB:304,OO:-125,UB:352},this.BP2__={BO:60,OO:-1762},this.BQ1__={BHH:1150,BHM:1521,BII:-1158,BIM:886,BMH:1208,BNH:449,BOH:-91,BOO:-2597,OHI:451,OIH:-296,OKA:1851,OKH:-1020,OKK:904,OOO:2965},this.BQ2__={BHH:118,BHI:-1159,BHM:466,BIH:-919,BKK:-1720,BKO:864,OHH:-1139,OHM:-181,OIH:153,UHI:-1146},this.BQ3__={BHH:-792,BHI:2664,BII:-299,BKI:419,BMH:937,BMM:8335,BNN:998,BOH:775,OHH:2174,OHM:439,OII:280,OKH:1798,OKI:-793,OKO:-2242,OMH:-2402,OOO:11699},this.BQ4__={BHH:-3895,BIH:3761,BII:-4654,BIK:1348,BKK:-1806,BMI:-3385,BOO:-12396,OAH:926,OHH:266,OHK:-2036,ONN:-973},this.BW1__={",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682},this.BW2__={"..":-11822,11:-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669},this.BW3__={"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1e3,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990},this.TC1__={AAA:1093,HHH:1029,HHM:580,HII:998,HOH:-390,HOM:-331,IHI:1169,IOH:-142,IOI:-1015,IOM:467,MMH:187,OOI:-1832},this.TC2__={HHO:2088,HII:-1023,HMM:-1154,IHI:-1965,KKH:703,OII:-2649},this.TC3__={AAA:-294,HHH:346,HHI:-341,HII:-1088,HIK:731,HOH:-1486,IHH:128,IHI:-3041,IHO:-1935,IIH:-825,IIM:-1035,IOI:-542,KHH:-1216,KKA:491,KKH:-1217,KOK:-1009,MHH:-2694,MHM:-457,MHO:123,MMH:-471,NNH:-1689,NNO:662,OHO:-3393},this.TC4__={HHH:-203,HHI:1344,HHK:365,HHM:-122,HHN:182,HHO:669,HIH:804,HII:679,HOH:446,IHH:695,IHO:-2324,IIH:321,III:1497,IIO:656,IOO:54,KAK:4845,KKA:3386,KKK:3065,MHH:-405,MHI:201,MMH:-241,MMM:661,MOM:841},this.TQ1__={BHHH:-227,BHHI:316,BHIH:-132,BIHH:60,BIII:1595,BNHH:-744,BOHH:225,BOOO:-908,OAKK:482,OHHH:281,OHIH:249,OIHI:200,OIIH:-68},this.TQ2__={BIHH:-1401,BIII:-1033,BKAK:-543,BOOO:-5591},this.TQ3__={BHHH:478,BHHM:-1073,BHIH:222,BHII:-504,BIIH:-116,BIII:-105,BMHI:-863,BMHM:-464,BOMH:620,OHHH:346,OHHI:1729,OHII:997,OHMH:481,OIHH:623,OIIH:1344,OKAK:2792,OKHH:587,OKKA:679,OOHH:110,OOII:-685},this.TQ4__={BHHH:-721,BHHM:-3604,BHII:-966,BIIH:-607,BIII:-2181,OAAA:-2763,OAKK:180,OHHH:-294,OHHI:2446,OHHO:480,OHIH:-1573,OIHH:1935,OIHI:-493,OIIH:626,OIII:-4007,OKAK:-8156},this.TW1__={"につい":-4681,"東京都":2026},this.TW2__={"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216},this.TW3__={"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287},this.TW4__={"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865},this.UC1__={A:484,K:93,M:645,O:-505},this.UC2__={A:819,H:1059,I:409,M:3987,N:5775,O:646},this.UC3__={A:-1370,I:2311},this.UC4__={A:-2643,H:1809,I:-1032,K:-3450,M:3565,N:3876,O:6646},this.UC5__={H:313,I:-1238,K:-799,M:539,O:-831},this.UC6__={H:-506,I:-253,K:87,M:247,O:-387},this.UP1__={O:-214},this.UP2__={B:69,O:935},this.UP3__={B:189},this.UQ1__={BH:21,BI:-12,BK:-99,BN:142,BO:-56,OH:-95,OI:477,OK:410,OO:-2422},this.UQ2__={BH:216,BI:113,OK:1759},this.UQ3__={BA:-479,BH:42,BI:1913,BK:-7198,BM:3160,BN:6427,BO:14761,OI:-827,ON:-3212},this.UW1__={",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135},this.UW2__={",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568},this.UW3__={",":4889,1:-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278},this.UW4__={",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1e3,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637},this.UW5__={",":465,".":-299,1:-514,E2:-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343},this.UW6__={",":227,".":808,1:-270,E1:306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496},this}t.prototype.ctype_=function(_){for(var t in this.chartype_)if(_.match(this.chartype_[t][0]))return this.chartype_[t][1];return"O"},t.prototype.ts_=function(_){return _||0},t.prototype.segment=function(_){if(null==_||null==_||""==_)return[];var t=[],H=["B3","B2","B1"],s=["O","O","O"],h=_.split("");for(K=0;K0&&(t.push(i),i="",N="B"),I=O,O=B,B=N,i+=H[K]}return t.push(i),t},_.TinySegmenter=t}})); \ No newline at end of file diff --git a/0.2/assets/javascripts/vendor.12f39d2a.min.js b/0.2/assets/javascripts/vendor.12f39d2a.min.js deleted file mode 100644 index 6d198607..00000000 --- a/0.2/assets/javascripts/vendor.12f39d2a.min.js +++ /dev/null @@ -1,15 +0,0 @@ -(window.webpackJsonp=window.webpackJsonp||[]).push([[1],[function(t,e,n){"use strict";n.d(e,"a",(function(){return i})),n.d(e,"b",(function(){return o}));var r=n(2);function i(t){return Object(r.a)(null==t?void 0:t.lift)}function o(t){return e=>{if(i(e))return e.lift((function(e){try{return t(e,this)}catch(t){this.error(t)}}));throw new TypeError("Unable to lift unknown Observable type")}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(16);class i extends r.b{constructor(t,e,n,r,i){super(t),this.onUnsubscribe=i,e&&(this._next=function(t){try{e(t)}catch(t){this.destination.error(t)}}),n&&(this._error=function(t){try{n(t)}catch(t){this.destination.error(t)}this.unsubscribe()}),r&&(this._complete=function(){try{r()}catch(t){this.destination.error(t)}this.unsubscribe()})}unsubscribe(){var t;!this.closed&&(null===(t=this.onUnsubscribe)||void 0===t||t.call(this)),super.unsubscribe()}}},function(t,e,n){"use strict";function r(t){return"function"==typeof t}n.d(e,"a",(function(){return r}))},,,function(t,e,n){"use strict";n.d(e,"a",(function(){return l}));var r=n(16),i=n(7),o=n(14),s=n(27),c=n(13),u=n(18),a=n(2);class l{constructor(t){t&&(this._subscribe=t)}lift(t){const e=new l;return e.source=this,e.operator=t,e}subscribe(t,e,n){const o=(s=t)&&s instanceof r.b||function(t){return t&&Object(a.a)(t.next)&&Object(a.a)(t.error)&&Object(a.a)(t.complete)}(s)&&Object(i.c)(s)?t:new r.a(t,e,n);var s;const{operator:u,source:l}=this;return o.add(u?u.call(o,l):l||c.a.useDeprecatedSynchronousErrorHandling?this._subscribe(o):this._trySubscribe(o)),o}_trySubscribe(t){try{return this._subscribe(t)}catch(e){if(c.a.useDeprecatedSynchronousErrorHandling)throw e;!function(t){for(;t;){const{closed:e,destination:n,isStopped:i}=t;if(e||i)return!1;t=n&&n instanceof r.b?n:null}return!0}(t)?Object(u.a)(e):t.error(e)}}forEach(t,e){return new(e=d(e))((e,n)=>{let r;r=this.subscribe(e=>{try{t(e)}catch(t){n(t),null==r||r.unsubscribe()}},n,e)})}_subscribe(t){var e;return null===(e=this.source)||void 0===e?void 0:e.subscribe(t)}[o.a](){return this}pipe(...t){return t.length?Object(s.b)(t)(this):this}toPromise(t){return new(t=d(t))((t,e)=>{let n;this.subscribe(t=>n=t,t=>e(t),()=>t(n))})}}function d(t){var e;return null!==(e=null!=t?t:c.a.Promise)&&void 0!==e?e:Promise}l.create=t=>new l(t)},function(t,e,n){"use strict";n.d(e,"a",(function(){return g})),n.d(e,"c",(function(){return _})),n.d(e,"b",(function(){return O}));function r(t,e,n,r){return new(n||(n=Promise))((function(i,o){function s(t){try{u(r.next(t))}catch(t){o(t)}}function c(t){try{u(r.throw(t))}catch(t){o(t)}}function u(t){var e;t.done?i(t.value):(e=t.value,e instanceof n?e:new n((function(t){t(e)}))).then(s,c)}u((r=r.apply(t,e||[])).next())}))}Object.create;function i(t){var e="function"==typeof Symbol&&Symbol.iterator,n=e&&t[e],r=0;if(n)return n.call(t);if(t&&"number"==typeof t.length)return{next:function(){return t&&r>=t.length&&(t=void 0),{value:t&&t[r++],done:!t}}};throw new TypeError(e?"Object is not iterable.":"Symbol.iterator is not defined.")}function o(t){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var e,n=t[Symbol.asyncIterator];return n?n.call(t):(t=i(t),e={},r("next"),r("throw"),r("return"),e[Symbol.asyncIterator]=function(){return this},e);function r(n){e[n]=t[n]&&function(e){return new Promise((function(r,i){(function(t,e,n,r){Promise.resolve(r).then((function(e){t({value:e,done:n})}),e)})(r,i,(e=t[n](e)).done,e.value)}))}}}Object.create;var s=n(20),c=n(2);function u(t){return Object(c.a)(null==t?void 0:t.then)}const a="function"==typeof Symbol&&Symbol.iterator?Symbol.iterator:"@@iterator";var l=n(14),d=n(5),f=n(7);var h=n(21);function b(t){return Object(c.a)(t[l.a])}function v(t){return Object(c.a)(null==t?void 0:t[a])}function p(t){return Symbol.asyncIterator&&Object(c.a)(null==t?void 0:t[Symbol.asyncIterator])}function m(t){return new TypeError(`You provided ${null!==t&&"object"==typeof t?"an invalid object":`'${t}'`} where a stream was expected. You can provide an Observable, Promise, Array, AsyncIterable, or Iterable.`)}function y(t,e){if(null!=t){if(b(t))return function(t,e){return new d.a(n=>{const r=new f.b;return r.add(e.schedule(()=>{const i=t[l.a]();r.add(i.subscribe({next(t){r.add(e.schedule(()=>n.next(t)))},error(t){r.add(e.schedule(()=>n.error(t)))},complete(){r.add(e.schedule(()=>n.complete()))}}))})),r})}(t,e);if(Object(s.a)(t))return Object(h.a)(t,e);if(u(t))return function(t,e){return new d.a(n=>e.schedule(()=>t.then(t=>{n.add(e.schedule(()=>{n.next(t),n.add(e.schedule(()=>n.complete()))}))},t=>{n.add(e.schedule(()=>n.error(t)))})))}(t,e);if(p(t))return function(t,e){if(!t)throw new Error("Iterable cannot be null");return new d.a(n=>{const r=new f.b;return r.add(e.schedule(()=>{const i=t[Symbol.asyncIterator]();r.add(e.schedule((function(){i.next().then(t=>{t.done?n.complete():(n.next(t.value),this.schedule())})})))})),r})}(t,e);if(v(t))return function(t,e){return new d.a(n=>{let r;return n.add(e.schedule(()=>{r=t[a](),function(t,e,n,r=0){const i=e.schedule((function(){try{n.call(this)}catch(e){t.error(e)}}),r);t.add(i)}(n,e,(function(){const{value:t,done:e}=r.next();e?n.complete():(n.next(t),this.schedule())}))})),()=>Object(c.a)(null==r?void 0:r.return)&&r.return()})}(t,e)}throw m(t)}var w=n(18);function g(t,e){return e?y(t,e):_(t)}function _(t){if(t instanceof d.a)return t;if(null!=t){if(b(t))return f=t,new d.a(t=>{const e=f[l.a]();if(Object(c.a)(e.subscribe))return e.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")});if(Object(s.a)(t))return O(t);if(u(t))return i=t,new d.a(t=>{i.then(e=>{t.closed||(t.next(e),t.complete())},e=>t.error(e)).then(null,w.a)});if(p(t))return n=t,new d.a(t=>{(function(t,e){var n,i,s,c;return r(this,void 0,void 0,(function*(){try{for(n=o(t);!(i=yield n.next()).done;){const t=i.value;e.next(t)}}catch(t){s={error:t}}finally{try{i&&!i.done&&(c=n.return)&&(yield c.call(n))}finally{if(s)throw s.error}}e.complete()}))})(n,t).catch(e=>t.error(e))});if(v(t))return e=t,new d.a(t=>{const n=e[a]();for(;!t.closed;){const{done:e,value:r}=n.next();e?t.complete():t.next(r)}return()=>Object(c.a)(null==n?void 0:n.return)&&n.return()})}var e,n,i,f;throw m(t)}function O(t){return new d.a(e=>{for(let n=0;nfunction(e){t(this),this.message=e?`${e.length} errors occurred during unsubscription:\n${e.map((t,e)=>`${e+1}) ${t.toString()}`).join("\n ")}`:"",this.name="UnsubscriptionError",this.errors=e});var s=n(11);class c{constructor(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._teardowns=null}unsubscribe(){let t;if(!this.closed){this.closed=!0;const{_parentage:e}=this;if(Array.isArray(e))for(const t of e)t.remove(this);else null==e||e.remove(this);const{initialTeardown:n}=this;if(Object(r.a)(n))try{n()}catch(e){t=e instanceof o?e.errors:[e]}const{_teardowns:i}=this;if(i){this._teardowns=null;for(const e of i)try{l(e)}catch(e){t=null!=t?t:[],e instanceof o?t=[...t,...e.errors]:t.push(e)}}if(t)throw new o(t)}}add(t){var e;if(t&&t!==this)if(this.closed)l(t);else{if(t instanceof c){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._teardowns=null!==(e=this._teardowns)&&void 0!==e?e:[]).push(t)}}_hasParent(t){const{_parentage:e}=this;return e===t||Array.isArray(e)&&e.includes(t)}_addParent(t){const{_parentage:e}=this;this._parentage=Array.isArray(e)?(e.push(t),e):e?[e,t]:t}_removeParent(t){const{_parentage:e}=this;e===t?this._parentage=null:Array.isArray(e)&&Object(s.a)(e,t)}remove(t){const{_teardowns:e}=this;e&&Object(s.a)(e,t),t instanceof c&&t._removeParent(this)}}c.EMPTY=(()=>{const t=new c;return t.closed=!0,t})();const u=c.EMPTY;function a(t){return t instanceof c||t&&"closed"in t&&Object(r.a)(t.remove)&&Object(r.a)(t.add)&&Object(r.a)(t.unsubscribe)}function l(t){Object(r.a)(t)?t():t.unsubscribe()}},function(t,e,n){"use strict";n.d(e,"b",(function(){return s})),n.d(e,"c",(function(){return c})),n.d(e,"a",(function(){return u}));var r=n(2),i=n(30);function o(t){return t[t.length-1]}function s(t){return Object(r.a)(o(t))?t.pop():void 0}function c(t){return Object(i.a)(o(t))?t.pop():void 0}function u(t,e){return"number"==typeof o(t)?t.pop():e}},function(t,e,n){"use strict";function r(t){return t}n.d(e,"a",(function(){return r}))},function(t,e,n){"use strict";function r(){}n.d(e,"a",(function(){return r}))},function(t,e,n){"use strict";function r(t,e){if(t){const n=t.indexOf(e);0<=n&&t.splice(n,1)}}n.d(e,"a",(function(){return r}))},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(5);const i=new r.a(t=>t.complete())},function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));const r={onUnhandledError:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1}},function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));const r="function"==typeof Symbol&&Symbol.observable||"@@observable"},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(21),i=n(6);function o(t,e){return e?Object(r.a)(t,e):Object(i.b)(t)}},function(t,e,n){"use strict";n.d(e,"b",(function(){return u})),n.d(e,"a",(function(){return a}));var r=n(2),i=n(7),o=n(13),s=n(18),c=n(10);class u extends i.b{constructor(t){super(),this.isStopped=!1,t?(this.destination=t,Object(i.c)(t)&&t.add(this)):this.destination=d}static create(t,e,n){return new a(t,e,n)}next(t){this.isStopped||this._next(t)}error(t){this.isStopped||(this.isStopped=!0,this._error(t))}complete(){this.isStopped||(this.isStopped=!0,this._complete())}unsubscribe(){this.closed||(this.isStopped=!0,super.unsubscribe())}_next(t){this.destination.next(t)}_error(t){this.destination.error(t),this.unsubscribe()}_complete(){this.destination.complete(),this.unsubscribe()}}class a extends u{constructor(t,e,n){if(super(),this.destination=d,(t||e||n)&&t!==d){let i;if(Object(r.a)(t))i=t;else if(t){let r;({next:i,error:e,complete:n}=t),this&&o.a.useDeprecatedNextContext?(r=Object.create(t),r.unsubscribe=()=>this.unsubscribe()):r=t,i=null==i?void 0:i.bind(r),e=null==e?void 0:e.bind(r),n=null==n?void 0:n.bind(r)}this.destination={next:i||c.a,error:e||l,complete:n||c.a}}}}function l(t){if(o.a.useDeprecatedSynchronousErrorHandling)throw t;Object(s.a)(t)}const d={closed:!0,next:c.a,error:l,complete:c.a}},function(t,e,n){"use strict";n.d(e,"a",(function(){return u}));var r=n(29),i=n(6),o=n(0),s=n(1);var c=n(2);function u(t,e,n=1/0){return Object(c.a)(e)?u((n,o)=>Object(r.a)((t,r)=>e(n,t,o,r))(Object(i.c)(t(n,o))),n):("number"==typeof e&&(n=e),Object(o.b)((e,r)=>function(t,e,n,r,o,c,u,a){let l=[],d=0,f=0,h=!1;const b=()=>{!h||l.length||d||e.complete()},v=t=>d{c&&e.next(t),d++,Object(i.c)(n(t,f++)).subscribe(new s.a(e,t=>{null==o||o(t),c?v(t):e.next(t)},void 0,()=>{for(d--;l.length&&dp(t))):p(t)}b()}))};return t.subscribe(new s.a(e,v,void 0,()=>{h=!0,b()})),()=>{l=null,null==a||a()}}(e,r,t,n)))}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(13);function i(t){setTimeout(()=>{const{onUnhandledError:e}=r.a;if(!e)throw t;e(t)})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(29);const{isArray:i}=Array;function o(t){return Object(r.a)(e=>function(t,e){return i(e)?t(...e):t(e)}(t,e))}},function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));const r=t=>t&&"number"==typeof t.length&&"function"!=typeof t},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(5);function i(t,e){return new r.a(n=>{let r=0;return e.schedule((function(){r===t.length?n.complete():(n.next(t[r++]),n.closed||this.schedule())}))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));const{isArray:r}=Array;function i(t){return 1===t.length&&r(t[0])?t[0]:t}},function(t,e,n){"use strict";n.d(e,"b",(function(){return i})),n.d(e,"a",(function(){return o}));var r=n(32);const i=new(n(34).a)(r.a),o=i},function(t,e,n){"use strict";n.d(e,"a",(function(){return u}));var r=n(5),i=n(7),o=n(26);const s=Object(o.a)(t=>function(){t(this),this.name="ObjectUnsubscribedError",this.message="object unsubscribed"});var c=n(11);class u extends r.a{constructor(){super(),this.observers=[],this.closed=!1,this.isStopped=!1,this.hasError=!1,this.thrownError=null}lift(t){const e=new a(this,this);return e.operator=t,e}_throwIfClosed(){if(this.closed)throw new s}next(t){if(this._throwIfClosed(),!this.isStopped){const e=this.observers.slice();for(const n of e)n.next(t)}}error(t){if(this._throwIfClosed(),!this.isStopped){this.hasError=this.isStopped=!0,this.thrownError=t;const{observers:e}=this;for(;e.length;)e.shift().error(t)}}complete(){if(this._throwIfClosed(),!this.isStopped){this.isStopped=!0;const{observers:t}=this;for(;t.length;)t.shift().complete()}}unsubscribe(){this.isStopped=this.closed=!0,this.observers=null}_trySubscribe(t){return this._throwIfClosed(),super._trySubscribe(t)}_subscribe(t){return this._throwIfClosed(),this._checkFinalizedStatuses(t),this._innerSubscribe(t)}_innerSubscribe(t){const{hasError:e,isStopped:n,observers:r}=this;return e||n?i.a:(r.push(t),new i.b(()=>Object(c.a)(this.observers,t)))}_checkFinalizedStatuses(t){const{hasError:e,thrownError:n,isStopped:r}=this;e?t.error(n):r&&t.complete()}asObservable(){const t=new r.a;return t.source=this,t}}u.create=(t,e)=>new a(t,e);class a extends u{constructor(t,e){super(),this.destination=t,this.source=e}next(t){var e,n;null===(n=null===(e=this.destination)||void 0===e?void 0:e.next)||void 0===n||n.call(e,t)}error(t){var e,n;null===(n=null===(e=this.destination)||void 0===e?void 0:e.error)||void 0===n||n.call(e,t)}complete(){var t,e;null===(e=null===(t=this.destination)||void 0===t?void 0:t.complete)||void 0===e||e.call(t)}_subscribe(t){var e,n;return null!==(n=null===(e=this.source)||void 0===e?void 0:e.subscribe(t))&&void 0!==n?n:i.a}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(31);var i=n(15),o=n(8);function s(...t){return Object(r.a)(1)(Object(i.a)(t,Object(o.c)(t)))}},function(t,e,n){"use strict";function r(t){const e=t(t=>{Error.call(t),t.stack=(new Error).stack});return e.prototype=Object.create(Error.prototype),e.prototype.constructor=e,e}n.d(e,"a",(function(){return r}))},function(t,e,n){"use strict";n.d(e,"a",(function(){return i})),n.d(e,"b",(function(){return o}));var r=n(9);function i(...t){return o(t)}function o(t){return 0===t.length?r.a:1===t.length?t[0]:function(e){return t.reduce((t,e)=>e(t),e)}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return r}));const r={now:()=>(r.delegate||Date).now(),delegate:void 0}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t,e){return Object(r.b)((n,r)=>{let o=0;n.subscribe(new i.a(r,n=>{r.next(t.call(e,n,o++))}))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(2);function i(t){return t&&Object(r.a)(t.schedule)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(17),i=n(9);function o(t=1/0){return Object(r.a)(i.a,t)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return c}));var r=n(7);class i extends r.b{constructor(t,e){super()}schedule(t,e=0){return this}}const o={setInterval(...t){const{delegate:e}=o;return((null==e?void 0:e.setInterval)||setInterval)(...t)},clearInterval(t){const{delegate:e}=o;return((null==e?void 0:e.clearInterval)||clearInterval)(t)},delegate:void 0};var s=n(11);class c extends i{constructor(t,e){super(t,e),this.scheduler=t,this.work=e,this.pending=!1}schedule(t,e=0){if(this.closed)return this;this.state=t;const n=this.id,r=this.scheduler;return null!=n&&(this.id=this.recycleAsyncId(r,n,e)),this.pending=!0,this.delay=e,this.id=this.id||this.requestAsyncId(r,this.id,e),this}requestAsyncId(t,e,n=0){return o.setInterval(t.flush.bind(t,this),n)}recycleAsyncId(t,e,n=0){if(null!=n&&this.delay===n&&!1===this.pending)return e;o.clearInterval(e)}execute(t,e){if(this.closed)return new Error("executing a cancelled action");this.pending=!1;const n=this._execute(t,e);if(n)return n;!1===this.pending&&null!=this.id&&(this.id=this.recycleAsyncId(this.scheduler,this.id,null))}_execute(t,e){let n,r=!1;try{this.work(t)}catch(t){r=!0,n=!!t&&t||new Error(t)}if(r)return this.unsubscribe(),n}unsubscribe(){if(!this.closed){const{id:t,scheduler:e}=this,{actions:n}=e;this.work=this.state=this.scheduler=null,this.pending=!1,Object(s.a)(n,this),null!=t&&(this.id=this.recycleAsyncId(e,t,null)),this.delay=null,super.unsubscribe()}}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(5),i=n(23),o=n(30);function s(t=0,e,n=i.a){let s=-1;return null!=e&&(Object(o.a)(e)?n=e:s=e),new r.a(e=>{let r=(i=t)instanceof Date&&!isNaN(i)?+t-n.now():t;var i;r<0&&(r=0);let o=0;return n.schedule((function(){e.closed||(e.next(o++),0<=s?this.schedule(void 0,s):e.complete())}),r)})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(28);class i{constructor(t,e=i.now){this.schedulerActionCtor=t,this.now=e}schedule(t,e=0,n){return new this.schedulerActionCtor(this,t).schedule(n,e)}}i.now=r.a.now;class o extends i{constructor(t,e=i.now){super(t,e),this.actions=[],this.active=!1,this.scheduled=void 0}flush(t){const{actions:e}=this;if(this.active)return void e.push(t);let n;this.active=!0;do{if(n=t.execute(t.state,t.delay))break}while(t=e.shift());if(this.active=!1,n){for(;t=e.shift();)t.unsubscribe();throw n}}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(6),i=n(0),o=n(1);function s(t,e){return Object(i.b)((n,i)=>{let s=null,c=0,u=!1;const a=()=>u&&!s&&i.complete();n.subscribe(new o.a(i,n=>{null==s||s.unsubscribe();let u=0;const l=c++;Object(r.c)(t(n,l)).subscribe(s=new o.a(i,t=>i.next(e?e(n,t,l,u++):t),void 0,()=>{s=null,a()}))},void 0,()=>{u=!0,a()}))})}},function(t,e,n){ -/*! - * clipboard.js v2.0.6 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */ -var r;r=function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=6)}([function(t,e){t.exports=function(t){var e;if("SELECT"===t.nodeName)t.focus(),e=t.value;else if("INPUT"===t.nodeName||"TEXTAREA"===t.nodeName){var n=t.hasAttribute("readonly");n||t.setAttribute("readonly",""),t.select(),t.setSelectionRange(0,t.value.length),n||t.removeAttribute("readonly"),e=t.value}else{t.hasAttribute("contenteditable")&&t.focus();var r=window.getSelection(),i=document.createRange();i.selectNodeContents(t),r.removeAllRanges(),r.addRange(i),e=r.toString()}return e}},function(t,e){function n(){}n.prototype={on:function(t,e,n){var r=this.e||(this.e={});return(r[t]||(r[t]=[])).push({fn:e,ctx:n}),this},once:function(t,e,n){var r=this;function i(){r.off(t,i),e.apply(n,arguments)}return i._=e,this.on(t,i,n)},emit:function(t){for(var e=[].slice.call(arguments,1),n=((this.e||(this.e={}))[t]||[]).slice(),r=0,i=n.length;r0&&void 0!==arguments[0]?arguments[0]:{};this.action=t.action,this.container=t.container,this.emitter=t.emitter,this.target=t.target,this.text=t.text,this.trigger=t.trigger,this.selectedText=""}},{key:"initSelection",value:function(){this.text?this.selectFake():this.target&&this.selectTarget()}},{key:"selectFake",value:function(){var t=this,e="rtl"==document.documentElement.getAttribute("dir");this.removeFake(),this.fakeHandlerCallback=function(){return t.removeFake()},this.fakeHandler=this.container.addEventListener("click",this.fakeHandlerCallback)||!0,this.fakeElem=document.createElement("textarea"),this.fakeElem.style.fontSize="12pt",this.fakeElem.style.border="0",this.fakeElem.style.padding="0",this.fakeElem.style.margin="0",this.fakeElem.style.position="absolute",this.fakeElem.style[e?"right":"left"]="-9999px";var n=window.pageYOffset||document.documentElement.scrollTop;this.fakeElem.style.top=n+"px",this.fakeElem.setAttribute("readonly",""),this.fakeElem.value=this.text,this.container.appendChild(this.fakeElem),this.selectedText=i()(this.fakeElem),this.copyText()}},{key:"removeFake",value:function(){this.fakeHandler&&(this.container.removeEventListener("click",this.fakeHandlerCallback),this.fakeHandler=null,this.fakeHandlerCallback=null),this.fakeElem&&(this.container.removeChild(this.fakeElem),this.fakeElem=null)}},{key:"selectTarget",value:function(){this.selectedText=i()(this.target),this.copyText()}},{key:"copyText",value:function(){var t=void 0;try{t=document.execCommand(this.action)}catch(e){t=!1}this.handleResult(t)}},{key:"handleResult",value:function(t){this.emitter.emit(t?"success":"error",{action:this.action,text:this.selectedText,trigger:this.trigger,clearSelection:this.clearSelection.bind(this)})}},{key:"clearSelection",value:function(){this.trigger&&this.trigger.focus(),document.activeElement.blur(),window.getSelection().removeAllRanges()}},{key:"destroy",value:function(){this.removeFake()}},{key:"action",set:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"copy";if(this._action=t,"copy"!==this._action&&"cut"!==this._action)throw new Error('Invalid "action" value, use either "copy" or "cut"')},get:function(){return this._action}},{key:"target",set:function(t){if(void 0!==t){if(!t||"object"!==(void 0===t?"undefined":o(t))||1!==t.nodeType)throw new Error('Invalid "target" value, use a valid Element');if("copy"===this.action&&t.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if("cut"===this.action&&(t.hasAttribute("readonly")||t.hasAttribute("disabled")))throw new Error('Invalid "target" attribute. You can\'t cut text from elements with "readonly" or "disabled" attributes');this._target=t}},get:function(){return this._target}}]),t}(),u=n(1),a=n.n(u),l=n(2),d=n.n(l),f="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},h=function(){function t(t,e){for(var n=0;n0&&void 0!==arguments[0]?arguments[0]:{};this.action="function"==typeof t.action?t.action:this.defaultAction,this.target="function"==typeof t.target?t.target:this.defaultTarget,this.text="function"==typeof t.text?t.text:this.defaultText,this.container="object"===f(t.container)?t.container:document.body}},{key:"listenClick",value:function(t){var e=this;this.listener=d()(t,"click",(function(t){return e.onClick(t)}))}},{key:"onClick",value:function(t){var e=t.delegateTarget||t.currentTarget;this.clipboardAction&&(this.clipboardAction=null),this.clipboardAction=new c({action:this.action(e),target:this.target(e),text:this.text(e),container:this.container,trigger:e,emitter:this})}},{key:"defaultAction",value:function(t){return v("action",t)}},{key:"defaultTarget",value:function(t){var e=v("target",t);if(e)return document.querySelector(e)}},{key:"defaultText",value:function(t){return v("text",t)}},{key:"destroy",value:function(){this.listener.destroy(),this.clipboardAction&&(this.clipboardAction.destroy(),this.clipboardAction=null)}}],[{key:"isSupported",value:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:["copy","cut"],e="string"==typeof t?[t]:t,n=!!document.queryCommandSupported;return e.forEach((function(t){n=n&&!!document.queryCommandSupported(t)})),n}}]),e}(a.a);function v(t,e){var n="data-clipboard-"+t;if(e.hasAttribute(n))return e.getAttribute(n)}e.default=b}]).default},t.exports=r()},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(24),i=n(28);class o extends r.a{constructor(t=1/0,e=1/0,n=i.a){super(),this.bufferSize=t,this.windowTime=e,this.timestampProvider=n,this.buffer=[],this.infiniteTimeWindow=!0,this.infiniteTimeWindow=e===1/0,this.bufferSize=Math.max(1,t),this.windowTime=Math.max(1,e)}next(t){const{isStopped:e,buffer:n,infiniteTimeWindow:r,timestampProvider:i,windowTime:o}=this;e||(n.push(t),!r&&n.push(i.now()+o)),this.trimBuffer(),super.next(t)}_subscribe(t){this._throwIfClosed(),this.trimBuffer();const e=this._innerSubscribe(t),{infiniteTimeWindow:n,buffer:r}=this,i=r.slice();for(let e=0;e0},t.prototype.connect_=function(){r&&!this.connected_&&(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),c?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},t.prototype.disconnect_=function(){r&&this.connected_&&(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},t.prototype.onTransitionEnd_=function(t){var e=t.propertyName,n=void 0===e?"":e;s.some((function(t){return!!~n.indexOf(t)}))&&this.refresh()},t.getInstance=function(){return this.instance_||(this.instance_=new t),this.instance_},t.instance_=null,t}(),a=function(t,e){for(var n=0,r=Object.keys(e);n0},t}(),_="undefined"!=typeof WeakMap?new WeakMap:new n,O=function t(e){if(!(this instanceof t))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var n=u.getInstance(),r=new g(e,n,this);_.set(this,r)};["observe","unobserve","disconnect"].forEach((function(t){O.prototype[t]=function(){var e;return(e=_.get(this))[t].apply(e,arguments)}}));var j=void 0!==i.ResizeObserver?i.ResizeObserver:O;e.a=j}).call(this,n(46))},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(5),i=n(6);function o(t){return new r.a(e=>{Object(i.c)(t()).subscribe(e)})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t,e){return Object(r.b)((n,r)=>{let o=0;n.subscribe(new i.a(r,n=>t.call(e,n,o++)&&r.next(n)))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(12),i=n(0),o=n(1);function s(t){return t<=0?()=>r.a:Object(i.b)((e,n)=>{let r=0;e.subscribe(new o.a(n,e=>{++r<=t&&(n.next(e),t<=r&&n.complete())}))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t,e){return t=null!=t?t:s,Object(r.b)((n,r)=>{let o,s=!0;n.subscribe(new i.a(r,n=>{(s&&(o=n,1)||!t(o,o=e?e(n):n))&&r.next(n),s=!1}))})}function s(t,e){return t===e}},function(t,e,n){"use strict"; -/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var r=/["'&<>]/;t.exports=function(t){var e,n=""+t,i=r.exec(n);if(!i)return n;var o="",s=0,c=0;for(s=i.index;sn[t]),keys:t}}}var e;return{args:t,keys:null}}var a=n(16),l=n(6),d=n(9),f=n(19),h=n(8);function b(...t){const e=Object(h.c)(t),n=Object(h.b)(t),{args:i,keys:o}=u(t),s=new r.a(p(i,e,o?t=>{const e={};for(let n=0;n{m(e,()=>{const{length:i}=t,o=new Array(i);let s=i;const c=t.map(()=>!1);let u=!0;for(let a=0;a{Object(l.a)(t[a],e).subscribe(new v(r,t=>{o[a]=t,u&&(c[a]=!0,u=!c.every(d.a)),u||r.next(n(o.slice()))},()=>0==--s))},r)}},r)}}function m(t,e,n){t?n.add(t.schedule(e)):e()}},function(t,e,n){!function(){"use strict";function t(t){var e=!0,n=!1,r=null,i={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function o(t){return!!(t&&t!==document&&"HTML"!==t.nodeName&&"BODY"!==t.nodeName&&"classList"in t&&"contains"in t.classList)}function s(t){t.classList.contains("focus-visible")||(t.classList.add("focus-visible"),t.setAttribute("data-focus-visible-added",""))}function c(t){e=!1}function u(){document.addEventListener("mousemove",a),document.addEventListener("mousedown",a),document.addEventListener("mouseup",a),document.addEventListener("pointermove",a),document.addEventListener("pointerdown",a),document.addEventListener("pointerup",a),document.addEventListener("touchmove",a),document.addEventListener("touchstart",a),document.addEventListener("touchend",a)}function a(t){t.target.nodeName&&"html"===t.target.nodeName.toLowerCase()||(e=!1,document.removeEventListener("mousemove",a),document.removeEventListener("mousedown",a),document.removeEventListener("mouseup",a),document.removeEventListener("pointermove",a),document.removeEventListener("pointerdown",a),document.removeEventListener("pointerup",a),document.removeEventListener("touchmove",a),document.removeEventListener("touchstart",a),document.removeEventListener("touchend",a))}document.addEventListener("keydown",(function(n){n.metaKey||n.altKey||n.ctrlKey||(o(t.activeElement)&&s(t.activeElement),e=!0)}),!0),document.addEventListener("mousedown",c,!0),document.addEventListener("pointerdown",c,!0),document.addEventListener("touchstart",c,!0),document.addEventListener("visibilitychange",(function(t){"hidden"===document.visibilityState&&(n&&(e=!0),u())}),!0),u(),t.addEventListener("focus",(function(t){var n,r,c;o(t.target)&&(e||(n=t.target,r=n.type,"INPUT"===(c=n.tagName)&&i[r]&&!n.readOnly||"TEXTAREA"===c&&!n.readOnly||n.isContentEditable))&&s(t.target)}),!0),t.addEventListener("blur",(function(t){var e;o(t.target)&&(t.target.classList.contains("focus-visible")||t.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(r),r=window.setTimeout((function(){n=!1}),100),(e=t.target).hasAttribute("data-focus-visible-added")&&(e.classList.remove("focus-visible"),e.removeAttribute("data-focus-visible-added")))}),!0),t.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&t.host?t.host.setAttribute("data-js-focus-visible",""):t.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if("undefined"!=typeof window&&"undefined"!=typeof document){var e;window.applyFocusVisiblePolyfill=t;try{e=new CustomEvent("focus-visible-polyfill-ready")}catch(t){(e=document.createEvent("CustomEvent")).initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(e)}"undefined"!=typeof document&&t(document)}()},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},,,,,function(t,e,n){"use strict";n.d(e,"a",(function(){return f}));var r=n(5),i=n(17),o=n(20),s=n(2),c=n(19),u=n(15);const a=["addListener","removeListener"],l=["addEventListener","removeEventListener"],d=["on","off"];function f(t,e,n,b){if(Object(s.a)(n)&&(b=n,n=void 0),b)return f(t,e,n).pipe(Object(c.a)(b));const[v,p]=function(t){return Object(s.a)(t.addEventListener)&&Object(s.a)(t.removeEventListener)}(t)?l.map(r=>i=>t[r](e,i,n)):function(t){return Object(s.a)(t.addListener)&&Object(s.a)(t.removeListener)}(t)?a.map(h(t,e)):function(t){return Object(s.a)(t.on)&&Object(s.a)(t.off)}(t)?d.map(h(t,e)):[];return!v&&Object(o.a)(t)?Object(i.a)(t=>f(t,e,n))(Object(u.a)(t)):new r.a(t=>{if(!v)throw new TypeError("Invalid event target");const e=(...e)=>t.next(1p(e)})}function h(t,e){return n=>r=>t[n](e,r)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t){return Object(r.b)((e,n)=>{e.subscribe(new i.a(n,()=>n.next(t)))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));var r=n(31),i=n(15),o=n(22),s=n(6),c=n(12),u=n(8);function a(...t){const e=Object(u.c)(t),n=Object(u.a)(t,1/0);return(t=Object(o.a)(t)).length?1===t.length?Object(s.c)(t[0]):Object(r.a)(n)(Object(i.a)(t,e)):c.a}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(25),i=n(8),o=n(0);function s(...t){const e=Object(i.c)(t);return Object(o.b)((n,i)=>{(e?Object(r.a)(t,n,e):Object(r.a)(t,n)).subscribe(i)})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(15),i=n(21),o=n(8);function s(...t){const e=Object(o.c)(t);return e?Object(i.a)(t,e):Object(r.a)(t)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(5),i=n(10);const o=new r.a(i.a)},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(0);function i(t){return Object(r.b)((e,n)=>{e.subscribe(n),n.add(t)})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(37),i=n(0);function o(t,e,n){let o;return o=t&&"object"==typeof t?t:{bufferSize:t,windowTime:e,refCount:!1,scheduler:n},Object(i.b)(function({bufferSize:t=1/0,windowTime:e=1/0,refCount:n,scheduler:i}){let o,s,c=0;return(u,a)=>{let l;c++,o?l=o.subscribe(a):(o=new r.a(t,e,i),l=o.subscribe(a),s=u.subscribe({next(t){o.next(t)},error(t){const e=o;s=void 0,o=void 0,e.error(t)},complete(){s=void 0,o.complete()}}),s.closed&&(s=void 0)),a.add(()=>{c--,l.unsubscribe(),n&&0===c&&s&&(s.unsubscribe(),s=void 0,o=void 0)})}}(o))}},function(t,e,n){"use strict";n.d(e,"a",(function(){return c}));var r=n(2),i=n(0),o=n(1),s=n(9);function c(t,e,n){const c=Object(r.a)(t)||e||n?{next:t,error:e,complete:n}:t;return c?Object(i.b)((t,e)=>{t.subscribe(new o.a(e,t=>{var n;null===(n=c.next)||void 0===n||n.call(c,t),e.next(t)},t=>{var n;null===(n=c.error)||void 0===n||n.call(c,t),e.error(t)},()=>{var t;null===(t=c.complete)||void 0===t||t.call(c),e.complete()}))}):s.a}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(24);class i extends r.a{constructor(t){super(),this._value=t}get value(){return this.getValue()}_subscribe(t){const e=super._subscribe(t);return!e.closed&&t.next(this._value),e}getValue(){const{hasError:t,thrownError:e,_value:n}=this;if(t)throw e;return this._throwIfClosed(),n}next(t){super.next(this._value=t)}}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(42);function i(t,e){return Object(r.a)((n,r)=>e?e(n[t],r[t]):n[t]===r[t])}},function(t,e,n){"use strict";n.d(e,"a",(function(){return c}));var r=n(0),i=n(1),o=n(6);const s={leading:!0,trailing:!1};function c(t,{leading:e,trailing:n}=s){return Object(r.b)((r,s)=>{let c=!1,u=null,a=null,l=!1;const d=()=>{null==a||a.unsubscribe(),a=null,n&&(h(),l&&s.complete())},f=e=>a=Object(o.c)(t(e)).subscribe(new i.a(s,d,void 0,d)),h=()=>{c&&(s.next(u),!l&&f(u)),c=!1,u=null};r.subscribe(new i.a(s,t=>{c=!0,u=t,(!a||a.closed)&&(e?h():f(t))},void 0,()=>{l=!0,(!(n&&c&&a)||a.closed)&&s.complete()}))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(35);function i(t,e){return e?Object(r.a)(()=>t,e):Object(r.a)(()=>t)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t,e=0){return Object(r.b)((n,r)=>{n.subscribe(new i.a(r,n=>r.add(t.schedule(()=>r.next(n),e)),n=>r.add(t.schedule(()=>r.error(n),e)),()=>r.add(t.schedule(()=>r.complete(),e))))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));var r=n(44),i=n(0),o=n(22),s=n(19),c=n(27),u=n(8);function a(...t){return function t(...e){const n=Object(u.b)(e);return n?Object(c.a)(t(...e),Object(s.a)(n)):Object(i.b)((t,n)=>{Object(r.b)([t,...Object(o.a)(e)])(n)})}(...t)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));var r=n(0),i=n(1),o=n(6),s=n(9),c=n(10),u=n(8);function a(...t){const e=Object(u.b)(t);return Object(r.b)((n,r)=>{const u=t.length,a=new Array(u);let l=t.map(()=>!1),d=!1;n.subscribe(new i.a(r,t=>{if(d){const n=[t,...a];r.next(e?e(...n):n)}}));for(let e=0;e{a[e]=t,d||l[e]||(l[e]=!0,(d=l.every(s.a))&&(l=null))},void 0,c.a))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(0),i=n(1);function o(t){return Object(r.b)((e,n)=>{let r=!1,o=null;e.subscribe(new i.a(n,t=>{r=!0,o=t}));const s=()=>{if(r){r=!1;const t=o;o=null,n.next(t)}};t.subscribe(new i.a(n,s,void 0,s))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return i}));var r=n(40);function i(t){return Object(r.a)((e,n)=>t<=n)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(6),i=n(1),o=n(0);function s(t){return Object(o.b)((e,n)=>{let o,c=null,u=!1;c=e.subscribe(new i.a(n,void 0,i=>{o=Object(r.c)(t(i,s(t)(e))),c?(c.unsubscribe(),c=null,o.subscribe(n)):u=!0})),u&&(c.unsubscribe(),c=null,o.subscribe(n))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(0),i=n(1),o=n(11);function s(t,e=null){return e=null!=e?e:t,Object(r.b)((n,r)=>{let s=[],c=0;n.subscribe(new i.a(r,n=>{let i=null;c++%e==0&&s.push([]);for(const e of s)e.push(n),t<=e.length&&(i=null!=i?i:[],i.push(e));if(i)for(const t of i)Object(o.a)(s,t),r.next(t)},void 0,()=>{for(const t of s)r.next(t);r.complete()},()=>{s=null}))})}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(17),i=n(2);function o(t,e){return Object(i.a)(e)?Object(r.a)(t,e,1):Object(r.a)(t,1)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return o}));var r=n(39),i=n(12);function o(t,e=i.a,n=i.a){return Object(r.a)(()=>t()?e:n)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return f}));var r=n(5),i=n(7),o=n(0),s=n(1);function c(){return Object(o.b)((t,e)=>{let n=null;t._refCount++;const r=new s.a(e,void 0,void 0,void 0,()=>{if(!t||t._refCount<=0||0<--t._refCount)return void(n=null);const r=t._connection,i=n;n=null,!r||i&&r!==i||r.unsubscribe(),e.unsubscribe()});t.subscribe(r),r.closed||(n=t.connect())})}class u extends r.a{constructor(t,e){super(),this.source=t,this.subjectFactory=e,this._subject=null,this._refCount=0,this._connection=null}_subscribe(t){return this.getSubject().subscribe(t)}getSubject(){const t=this._subject;return t&&!t.isStopped||(this._subject=this.subjectFactory()),this._subject}_teardown(){this._refCount=0;const{_connection:t}=this;this._subject=this._connection=null,null==t||t.unsubscribe()}connect(){let t=this._connection;if(!t){t=this._connection=new i.b;const e=this.getSubject();t.add(this.source.subscribe(new s.a(e,void 0,t=>{this._teardown(),e.error(t)},()=>{this._teardown(),e.complete()},()=>this._teardown()))),t.closed&&(this._connection=null,t=i.b.EMPTY)}return t}refCount(){return c()(this)}}var a=n(2);var l=n(24);function d(){return new l.a}function f(){return t=>c()(function(t,e){const n=Object(a.a)(t)?t:()=>t;return Object(a.a)(e)?Object(o.b)((t,r)=>{const i=n();e(i).subscribe(r).add(t.subscribe(i))}):t=>{const e=new u(t,n);return Object(o.a)(t)&&(e.lift=t.lift),e.source=t,e.subjectFactory=n,e}}(d)(t))}},function(t,e,n){"use strict";n.d(e,"a",(function(){return a}));var r=n(32),i=n(7);const o={schedule(t){let e=requestAnimationFrame,n=cancelAnimationFrame;const{delegate:r}=o;r&&(e=r.requestAnimationFrame,n=r.cancelAnimationFrame);const s=e(e=>{n=void 0,t(e)});return new i.b(()=>null==n?void 0:n(s))},requestAnimationFrame(...t){const{delegate:e}=o;return((null==e?void 0:e.requestAnimationFrame)||requestAnimationFrame)(...t)},cancelAnimationFrame(...t){const{delegate:e}=o;return((null==e?void 0:e.cancelAnimationFrame)||cancelAnimationFrame)(...t)},delegate:void 0};class s extends r.a{constructor(t,e){super(t,e),this.scheduler=t,this.work=e}requestAsyncId(t,e,n=0){return null!==n&&n>0?super.requestAsyncId(t,e,n):(t.actions.push(this),t.scheduled||(t.scheduled=o.requestAnimationFrame(()=>t.flush(void 0))))}recycleAsyncId(t,e,n=0){if(null!=n&&n>0||null==n&&this.delay>0)return super.recycleAsyncId(t,e,n);0===t.actions.length&&(o.cancelAnimationFrame(e),t.scheduled=void 0)}}var c=n(34);class u extends c.a{flush(t){this.active=!0,this.scheduled=void 0;const{actions:e}=this;let n,r=-1;t=t||e.shift();const i=e.length;do{if(n=t.execute(t.state,t.delay))break}while(++rObject(s.a)(e.pipe(Object(c.a)(1),Object(i.b)((t,e)=>{t.subscribe(new o.a(e,u.a))})),n.pipe(a(t))):Object(i.b)((e,n)=>{let r=0,i=!1,s=0;const c=()=>i&&!s&&n.complete();e.subscribe(new o.a(n,e=>{let i=!1;const u=()=>{n.next(e),null==a||a.unsubscribe(),i||(s--,i=!0,c())},a=new o.a(n,u,void 0,u);s++,t(e,r++).subscribe(a)},void 0,()=>{i=!0,c()}))})}var l=n(33);function d(t,e=r.b){const n=Object(l.a)(t,e);return a(()=>n)}},function(t,e,n){"use strict";n.d(e,"a",(function(){return s}));var r=n(0),i=n(1);function o(t,e,n,r,o){return(s,c)=>{let u=n,a=e,l=0;s.subscribe(new i.a(c,e=>{const n=l++;a=u?t(a,e,n):(u=!0,e),r&&c.next(a)},void 0,o&&(()=>{u&&c.next(a),c.complete()})))}}function s(t,e){return Object(r.b)(o(t,e,arguments.length>=2,!0))}},function(t,e,n){"use strict";n.d(e,"a",(function(){return u}));var r=n(23),i=n(0),o=n(1),s=n(6);var c=n(33);function u(t,e=r.b){const n=Object(c.a)(t,e);return u=()=>n,Object(i.b)((t,e)=>{let n=!1,r=null,i=null;const c=()=>{if(null==i||i.unsubscribe(),i=null,n){n=!1;const t=r;r=null,e.next(t)}};t.subscribe(new o.a(e,t=>{null==i||i.unsubscribe(),n=!0,r=t,i=new o.a(e,c,void 0,c),Object(s.c)(u(t)).subscribe(i)},void 0,()=>{c(),e.complete()},()=>{r=i=null}))});var u}},function(t,e,n){"use strict";n.d(e,"a",(function(){return d}));var r=n(5),i=n(6),o=n(22),s=n(12),c=n(1),u=n(8);var a=n(0);function l(...t){return Object(a.b)((e,n)=>{(function(...t){const e=Object(u.b)(t);return(t=Object(o.a)(t)).length?new r.a(n=>{let r=t.map(()=>[]),o=t.map(()=>!1);n.add(()=>{r=o=null});for(let s=0;!n.closed&&s{if(r[s].push(t),r.every(t=>t.length)){const t=r.map(t=>t.shift());n.next(e?e(...t):t),r.some((t,e)=>!t.length&&o[e])&&n.complete()}},void 0,()=>{o[s]=!0,!r[s].length&&n.complete()}));return()=>{r=o=null}}):s.a})(e,...t).subscribe(n)})}function d(...t){return l(...t)}}]]); \ No newline at end of file diff --git a/0.2/assets/javascripts/worker/search.0f64ce30.min.js b/0.2/assets/javascripts/worker/search.0f64ce30.min.js deleted file mode 100644 index 99676d0b..00000000 --- a/0.2/assets/javascripts/worker/search.0f64ce30.min.js +++ /dev/null @@ -1,58 +0,0 @@ -!function(e){var t={};function r(n){if(t[n])return t[n].exports;var i=t[n]={i:n,l:!1,exports:{}};return e[n].call(i.exports,i,i.exports,r),i.l=!0,i.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)r.d(n,i,function(t){return e[t]}.bind(null,i));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=5)}([function(e,t,r){"use strict"; -/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var n=/["'&<>]/;e.exports=function(e){var t,r=""+e,i=n.exec(r);if(!i)return r;var s="",o=0,a=0;for(o=i.index;o0){var u=I.utils.clone(t)||{};u.position=[o,a],u.index=i.length,i.push(new I.Token(r.slice(o,s),u))}o=s+1}}return i},I.tokenizer.separator=/[\s\-]+/ -/*! - * lunr.Pipeline - * Copyright (C) 2020 Oliver Nightingale - */,I.Pipeline=function(){this._stack=[]},I.Pipeline.registeredFunctions=Object.create(null),I.Pipeline.registerFunction=function(e,t){t in this.registeredFunctions&&I.utils.warn("Overwriting existing registered function: "+t),e.label=t,I.Pipeline.registeredFunctions[e.label]=e},I.Pipeline.warnIfFunctionNotRegistered=function(e){e.label&&e.label in this.registeredFunctions||I.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},I.Pipeline.load=function(e){var t=new I.Pipeline;return e.forEach((function(e){var r=I.Pipeline.registeredFunctions[e];if(!r)throw new Error("Cannot load unregistered function: "+e);t.add(r)})),t},I.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach((function(e){I.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)}),this)},I.Pipeline.prototype.after=function(e,t){I.Pipeline.warnIfFunctionNotRegistered(t);var r=this._stack.indexOf(e);if(-1==r)throw new Error("Cannot find existingFn");r+=1,this._stack.splice(r,0,t)},I.Pipeline.prototype.before=function(e,t){I.Pipeline.warnIfFunctionNotRegistered(t);var r=this._stack.indexOf(e);if(-1==r)throw new Error("Cannot find existingFn");this._stack.splice(r,0,t)},I.Pipeline.prototype.remove=function(e){var t=this._stack.indexOf(e);-1!=t&&this._stack.splice(t,1)},I.Pipeline.prototype.run=function(e){for(var t=this._stack.length,r=0;r1&&(se&&(r=i),s!=e);)n=r-t,i=t+Math.floor(n/2),s=this.elements[2*i];return s==e||s>e?2*i:sa?l+=2:o==a&&(t+=r[u+1]*n[l+1],u+=2,l+=2);return t},I.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},I.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),t=1,r=0;t0){var s,o=i.str.charAt(0);o in i.node.edges?s=i.node.edges[o]:(s=new I.TokenSet,i.node.edges[o]=s),1==i.str.length&&(s.final=!0),n.push({node:s,editsRemaining:i.editsRemaining,str:i.str.slice(1)})}if(0!=i.editsRemaining){if("*"in i.node.edges)var a=i.node.edges["*"];else{a=new I.TokenSet;i.node.edges["*"]=a}if(0==i.str.length&&(a.final=!0),n.push({node:a,editsRemaining:i.editsRemaining-1,str:i.str}),i.str.length>1&&n.push({node:i.node,editsRemaining:i.editsRemaining-1,str:i.str.slice(1)}),1==i.str.length&&(i.node.final=!0),i.str.length>=1){if("*"in i.node.edges)var u=i.node.edges["*"];else{u=new I.TokenSet;i.node.edges["*"]=u}1==i.str.length&&(u.final=!0),n.push({node:u,editsRemaining:i.editsRemaining-1,str:i.str.slice(1)})}if(i.str.length>1){var l,c=i.str.charAt(0),h=i.str.charAt(1);h in i.node.edges?l=i.node.edges[h]:(l=new I.TokenSet,i.node.edges[h]=l),1==i.str.length&&(l.final=!0),n.push({node:l,editsRemaining:i.editsRemaining-1,str:c+i.str.slice(2)})}}}return r},I.TokenSet.fromString=function(e){for(var t=new I.TokenSet,r=t,n=0,i=e.length;n=e;t--){var r=this.uncheckedNodes[t],n=r.child.toString();n in this.minimizedNodes?r.parent.edges[r.char]=this.minimizedNodes[n]:(r.child._str=n,this.minimizedNodes[n]=r.child),this.uncheckedNodes.pop()}} -/*! - * lunr.Index - * Copyright (C) 2020 Oliver Nightingale - */,I.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},I.Index.prototype.search=function(e){return this.query((function(t){new I.QueryParser(e,t).parse()}))},I.Index.prototype.query=function(e){for(var t=new I.Query(this.fields),r=Object.create(null),n=Object.create(null),i=Object.create(null),s=Object.create(null),o=Object.create(null),a=0;a1?1:e},I.Builder.prototype.k1=function(e){this._k1=e},I.Builder.prototype.add=function(e,t){var r=e[this._ref],n=Object.keys(this._fields);this._documents[r]=t||{},this.documentCount+=1;for(var i=0;i=this.length)return I.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},I.QueryLexer.prototype.width=function(){return this.pos-this.start},I.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},I.QueryLexer.prototype.backup=function(){this.pos-=1},I.QueryLexer.prototype.acceptDigitRun=function(){var e,t;do{t=(e=this.next()).charCodeAt(0)}while(t>47&&t<58);e!=I.QueryLexer.EOS&&this.backup()},I.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(I.QueryLexer.TERM)),e.ignore(),e.more())return I.QueryLexer.lexText},I.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(I.QueryLexer.EDIT_DISTANCE),I.QueryLexer.lexText},I.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(I.QueryLexer.BOOST),I.QueryLexer.lexText},I.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(I.QueryLexer.TERM)},I.QueryLexer.termSeparator=I.tokenizer.separator,I.QueryLexer.lexText=function(e){for(;;){var t=e.next();if(t==I.QueryLexer.EOS)return I.QueryLexer.lexEOS;if(92!=t.charCodeAt(0)){if(":"==t)return I.QueryLexer.lexField;if("~"==t)return e.backup(),e.width()>0&&e.emit(I.QueryLexer.TERM),I.QueryLexer.lexEditDistance;if("^"==t)return e.backup(),e.width()>0&&e.emit(I.QueryLexer.TERM),I.QueryLexer.lexBoost;if("+"==t&&1===e.width())return e.emit(I.QueryLexer.PRESENCE),I.QueryLexer.lexText;if("-"==t&&1===e.width())return e.emit(I.QueryLexer.PRESENCE),I.QueryLexer.lexText;if(t.match(I.QueryLexer.termSeparator))return I.QueryLexer.lexTerm}else e.escapeCharacter()}},I.QueryParser=function(e,t){this.lexer=new I.QueryLexer(e),this.query=t,this.currentClause={},this.lexemeIdx=0},I.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=I.QueryParser.parseClause;e;)e=e(this);return this.query},I.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},I.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},I.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},I.QueryParser.parseClause=function(e){var t=e.peekLexeme();if(null!=t)switch(t.type){case I.QueryLexer.PRESENCE:return I.QueryParser.parsePresence;case I.QueryLexer.FIELD:return I.QueryParser.parseField;case I.QueryLexer.TERM:return I.QueryParser.parseTerm;default:var r="expected either a field or a term, found "+t.type;throw t.str.length>=1&&(r+=" with value '"+t.str+"'"),new I.QueryParseError(r,t.start,t.end)}},I.QueryParser.parsePresence=function(e){var t=e.consumeLexeme();if(null!=t){switch(t.str){case"-":e.currentClause.presence=I.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=I.Query.presence.REQUIRED;break;default:var r="unrecognised presence operator'"+t.str+"'";throw new I.QueryParseError(r,t.start,t.end)}var n=e.peekLexeme();if(null==n){r="expecting term or field, found nothing";throw new I.QueryParseError(r,t.start,t.end)}switch(n.type){case I.QueryLexer.FIELD:return I.QueryParser.parseField;case I.QueryLexer.TERM:return I.QueryParser.parseTerm;default:r="expecting term or field, found '"+n.type+"'";throw new I.QueryParseError(r,n.start,n.end)}}},I.QueryParser.parseField=function(e){var t=e.consumeLexeme();if(null!=t){if(-1==e.query.allFields.indexOf(t.str)){var r=e.query.allFields.map((function(e){return"'"+e+"'"})).join(", "),n="unrecognised field '"+t.str+"', possible fields: "+r;throw new I.QueryParseError(n,t.start,t.end)}e.currentClause.fields=[t.str];var i=e.peekLexeme();if(null==i){n="expecting term, found nothing";throw new I.QueryParseError(n,t.start,t.end)}switch(i.type){case I.QueryLexer.TERM:return I.QueryParser.parseTerm;default:n="expecting term, found '"+i.type+"'";throw new I.QueryParseError(n,i.start,i.end)}}},I.QueryParser.parseTerm=function(e){var t=e.consumeLexeme();if(null!=t){e.currentClause.term=t.str.toLowerCase(),-1!=t.str.indexOf("*")&&(e.currentClause.usePipeline=!1);var r=e.peekLexeme();if(null!=r)switch(r.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:var n="Unexpected lexeme type '"+r.type+"'";throw new I.QueryParseError(n,r.start,r.end)}else e.nextClause()}},I.QueryParser.parseEditDistance=function(e){var t=e.consumeLexeme();if(null!=t){var r=parseInt(t.str,10);if(isNaN(r)){var n="edit distance must be numeric";throw new I.QueryParseError(n,t.start,t.end)}e.currentClause.editDistance=r;var i=e.peekLexeme();if(null!=i)switch(i.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:n="Unexpected lexeme type '"+i.type+"'";throw new I.QueryParseError(n,i.start,i.end)}else e.nextClause()}},I.QueryParser.parseBoost=function(e){var t=e.consumeLexeme();if(null!=t){var r=parseInt(t.str,10);if(isNaN(r)){var n="boost must be numeric";throw new I.QueryParseError(n,t.start,t.end)}e.currentClause.boost=r;var i=e.peekLexeme();if(null!=i)switch(i.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:n="Unexpected lexeme type '"+i.type+"'";throw new I.QueryParseError(n,i.start,i.end)}else e.nextClause()}},void 0===(i="function"==typeof(n=function(){return I})?n.call(t,r,t,e):n)||(e.exports=i)}()},function(e,t,r){"use strict";(function(t){e.exports=function(){if("object"==typeof globalThis)return globalThis;var e;try{e=this||new Function("return this")()}catch(e){if("object"==typeof window)return window;if("object"==typeof self)return self;if(void 0!==t)return t}return e}()}).call(this,r(4))},function(e,t){var r;r=function(){return this}();try{r=r||new Function("return this")()}catch(e){"object"==typeof window&&(r=window)}e.exports=r},function(e,t,r){"use strict";r.r(t),r.d(t,"handler",(function(){return u}));function n(e,t,r,n){return new(r||(r=Promise))((function(i,s){function o(e){try{u(n.next(e))}catch(e){s(e)}}function a(e){try{u(n.throw(e))}catch(e){s(e)}}function u(e){var t;e.done?i(e.value):(t=e.value,t instanceof r?t:new r((function(e){e(t)}))).then(o,a)}u((n=n.apply(e,t||[])).next())}))}Object.create;Object.create;r(1);var i,s=r(0);class o{constructor({config:e,docs:t,index:r,options:n}){this.options=n,this.documents=function(e){const t=new Map,r=new Set;for(const n of e){const[e,i]=n.location.split("#"),o=n.location,a=n.title,u=s(n.text).replace(/\s+(?=[,.:;!?])/g,"").replace(/\s+/g," ");if(i){const i=t.get(e);r.has(i)?t.set(o,{location:o,title:a,text:u,parent:i}):(i.title=n.title,i.text=u,r.add(i))}else t.set(o,{location:o,title:a,text:u})}return t}(t),this.highlight=function(e){const t=new RegExp(e.separator,"img"),r=(e,t,r)=>`${t}${r}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();const i=new RegExp(`(^|${e.separator})(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(t,"|")})`,"img");return e=>e.replace(i,r).replace(/<\/mark>(\s+)]*>/gim,"$1")}}(e),lunr.tokenizer.separator=new RegExp(e.separator),this.index=void 0===r?lunr((function(){1===e.lang.length&&"en"!==e.lang[0]?this.use(lunr[e.lang[0]]):e.lang.length>1&&this.use(lunr.multiLanguage(...e.lang));const r=function(e,t){const[r,n]=[new Set(e),new Set(t)];return[...new Set([...r].filter(e=>!n.has(e)))]}(["trimmer","stopWordFilter","stemmer"],n.pipeline);for(const t of e.lang.map(e=>"en"===e?lunr:lunr[e]))for(const e of r)this.pipeline.remove(t[e]),this.searchPipeline.remove(t[e]);this.field("title",{boost:1e3}),this.field("text"),this.ref("location");for(const e of t)this.add(e)})):lunr.Index.load(r)}search(e){if(e)try{const t=this.highlight(e),r=function(e){const t=new lunr.Query(["title","text"]);return new lunr.QueryParser(e,t).parse(),t.clauses}(e).filter(e=>e.presence!==lunr.Query.presence.PROHIBITED),n=this.index.search(e+"*").reduce((e,{ref:n,score:i,matchData:s})=>{const o=this.documents.get(n);if(void 0!==o){const{location:n,title:a,text:u,parent:l}=o,c=function(e,t){const r=new Set(e),n={};for(let e=0;ee);e.push({location:n,title:t(a),text:t(u),score:i*(1+h),terms:c})}return e},[]).sort((e,t)=>t.score-e.score).reduce((e,t)=>{const r=this.documents.get(t.location);if(void 0!==r){const n="parent"in r?r.parent.location:r.location;e.set(n,[...e.get(n)||[],t])}return e},new Map);let i;if(this.options.suggestions){const e=this.index.query(e=>{for(const t of r)e.term(t.term,{fields:["title"],presence:lunr.Query.presence.REQUIRED,wildcard:lunr.Query.wildcard.TRAILING})});i=e.length?Object.keys(e[0].matchData.metadata):[]}return Object.assign({items:[...n.values()]},void 0!==i&&{suggestions:i})}catch(t){console.warn(`Invalid query: ${e} – see https://bit.ly/2s3ChXG`)}return{items:[]}}}let a;function u(e){return n(this,void 0,void 0,(function*(){switch(e.type){case i.SETUP:return yield function(e){return n(this,void 0,void 0,(function*(){let t="../lunr";if("undefined"!=typeof parent&&"IFrameWorker"in parent){const e=document.querySelector("script[src]"),[r]=e.src.split("/worker");t=t.replace("..",r)}const r=[];for(const n of e.lang)"ja"===n&&r.push(t+"/tinyseg.min.js"),"en"!==n&&r.push(`${t}/min/lunr.${n}.min.js`);e.lang.length>1&&r.push(t+"/min/lunr.multi.min.js"),r.length&&(yield importScripts(t+"/min/lunr.stemmer.support.min.js",...r))}))}(e.data.config),a=new o(e.data),{type:i.READY};case i.QUERY:return{type:i.RESULT,data:a?a.search(e.data):{items:[]}};default:throw new TypeError("Invalid message type")}}))}!function(e){e[e.SETUP=0]="SETUP",e[e.READY=1]="READY",e[e.QUERY=2]="QUERY",e[e.RESULT=3]="RESULT"}(i||(i={})),addEventListener("message",e=>n(void 0,void 0,void 0,(function*(){postMessage(yield u(e.data))})))}]); \ No newline at end of file diff --git a/0.2/assets/javascripts/workers/search.d10a1f1d.min.js b/0.2/assets/javascripts/workers/search.d10a1f1d.min.js new file mode 100644 index 00000000..3e928bde --- /dev/null +++ b/0.2/assets/javascripts/workers/search.d10a1f1d.min.js @@ -0,0 +1,59 @@ +(()=>{var he=Object.create,U=Object.defineProperty,de=Object.getPrototypeOf,fe=Object.prototype.hasOwnProperty,pe=Object.getOwnPropertyNames,ge=Object.getOwnPropertyDescriptor;var H=Object.assign,ye=t=>U(t,"__esModule",{value:!0});var Y=(t,e)=>()=>(e||(e={exports:{}},t(e.exports,e)),e.exports);var me=(t,e,r)=>{if(e&&typeof e=="object"||typeof e=="function")for(let n of pe(e))!fe.call(t,n)&&n!=="default"&&U(t,n,{get:()=>e[n],enumerable:!(r=ge(e,n))||r.enumerable});return t},G=t=>me(ye(U(t!=null?he(de(t)):{},"default",t&&t.__esModule&&"default"in t?{get:()=>t.default,enumerable:!0}:{value:t,enumerable:!0})),t);var z=(t,e,r)=>new Promise((n,i)=>{var s=u=>{try{a(r.next(u))}catch(c){i(c)}},o=u=>{try{a(r.throw(u))}catch(c){i(c)}},a=u=>u.done?n(u.value):Promise.resolve(u.value).then(s,o);a((r=r.apply(t,e)).next())});var Z=Y((J,X)=>{(function(){var t=function(e){var r=new t.Builder;return r.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),r.searchPipeline.add(t.stemmer),e.call(r,r),r.build()};t.version="2.3.9";t.utils={},t.utils.warn=function(e){return function(r){e.console&&console.warn&&console.warn(r)}}(this),t.utils.asString=function(e){return e==null?"":e.toString()},t.utils.clone=function(e){if(e==null)return e;for(var r=Object.create(null),n=Object.keys(e),i=0;i0){var h=t.utils.clone(r)||{};h.position=[a,c],h.index=s.length,s.push(new t.Token(n.slice(a,o),h))}a=o+1}}return s},t.tokenizer.separator=/[\s\-]+/;t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions=Object.create(null),t.Pipeline.registerFunction=function(e,r){r in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+r),e.label=r,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var r=e.label&&e.label in this.registeredFunctions;r||t.utils.warn(`Function is not registered with pipeline. This may cause problems when serialising the index. +`,e)},t.Pipeline.load=function(e){var r=new t.Pipeline;return e.forEach(function(n){var i=t.Pipeline.registeredFunctions[n];if(i)r.add(i);else throw new Error("Cannot load unregistered function: "+n)}),r},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(r){t.Pipeline.warnIfFunctionNotRegistered(r),this._stack.push(r)},this)},t.Pipeline.prototype.after=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");n=n+1,this._stack.splice(n,0,r)},t.Pipeline.prototype.before=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,r)},t.Pipeline.prototype.remove=function(e){var r=this._stack.indexOf(e);r!=-1&&this._stack.splice(r,1)},t.Pipeline.prototype.run=function(e){for(var r=this._stack.length,n=0;n1&&(oe&&(n=s),o!=e);)i=n-r,s=r+Math.floor(i/2),o=this.elements[s*2];if(o==e||o>e)return s*2;if(ou?h+=2:a==u&&(r+=n[c+1]*i[h+1],c+=2,h+=2);return r},t.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},t.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),r=1,n=0;r0){var o=s.str.charAt(0),a;o in s.node.edges?a=s.node.edges[o]:(a=new t.TokenSet,s.node.edges[o]=a),s.str.length==1&&(a.final=!0),i.push({node:a,editsRemaining:s.editsRemaining,str:s.str.slice(1)})}if(s.editsRemaining!=0){if("*"in s.node.edges)var u=s.node.edges["*"];else{var u=new t.TokenSet;s.node.edges["*"]=u}if(s.str.length==0&&(u.final=!0),i.push({node:u,editsRemaining:s.editsRemaining-1,str:s.str}),s.str.length>1&&i.push({node:s.node,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)}),s.str.length==1&&(s.node.final=!0),s.str.length>=1){if("*"in s.node.edges)var c=s.node.edges["*"];else{var c=new t.TokenSet;s.node.edges["*"]=c}s.str.length==1&&(c.final=!0),i.push({node:c,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)})}if(s.str.length>1){var h=s.str.charAt(0),y=s.str.charAt(1),g;y in s.node.edges?g=s.node.edges[y]:(g=new t.TokenSet,s.node.edges[y]=g),s.str.length==1&&(g.final=!0),i.push({node:g,editsRemaining:s.editsRemaining-1,str:h+s.str.slice(2)})}}}return n},t.TokenSet.fromString=function(e){for(var r=new t.TokenSet,n=r,i=0,s=e.length;i=e;r--){var n=this.uncheckedNodes[r],i=n.child.toString();i in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[i]:(n.child._str=i,this.minimizedNodes[i]=n.child),this.uncheckedNodes.pop()}};t.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},t.Index.prototype.search=function(e){return this.query(function(r){var n=new t.QueryParser(e,r);n.parse()})},t.Index.prototype.query=function(e){for(var r=new t.Query(this.fields),n=Object.create(null),i=Object.create(null),s=Object.create(null),o=Object.create(null),a=Object.create(null),u=0;u1?this._b=1:this._b=e},t.Builder.prototype.k1=function(e){this._k1=e},t.Builder.prototype.add=function(e,r){var n=e[this._ref],i=Object.keys(this._fields);this._documents[n]=r||{},this.documentCount+=1;for(var s=0;s=this.length)return t.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},t.QueryLexer.prototype.width=function(){return this.pos-this.start},t.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},t.QueryLexer.prototype.backup=function(){this.pos-=1},t.QueryLexer.prototype.acceptDigitRun=function(){var e,r;do e=this.next(),r=e.charCodeAt(0);while(r>47&&r<58);e!=t.QueryLexer.EOS&&this.backup()},t.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(t.QueryLexer.TERM)),e.ignore(),e.more())return t.QueryLexer.lexText},t.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.EDIT_DISTANCE),t.QueryLexer.lexText},t.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.BOOST),t.QueryLexer.lexText},t.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(t.QueryLexer.TERM)},t.QueryLexer.termSeparator=t.tokenizer.separator,t.QueryLexer.lexText=function(e){for(;;){var r=e.next();if(r==t.QueryLexer.EOS)return t.QueryLexer.lexEOS;if(r.charCodeAt(0)==92){e.escapeCharacter();continue}if(r==":")return t.QueryLexer.lexField;if(r=="~")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexEditDistance;if(r=="^")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexBoost;if(r=="+"&&e.width()===1||r=="-"&&e.width()===1)return e.emit(t.QueryLexer.PRESENCE),t.QueryLexer.lexText;if(r.match(t.QueryLexer.termSeparator))return t.QueryLexer.lexTerm}},t.QueryParser=function(e,r){this.lexer=new t.QueryLexer(e),this.query=r,this.currentClause={},this.lexemeIdx=0},t.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=t.QueryParser.parseClause;e;)e=e(this);return this.query},t.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},t.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},t.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},t.QueryParser.parseClause=function(e){var r=e.peekLexeme();if(r!=null)switch(r.type){case t.QueryLexer.PRESENCE:return t.QueryParser.parsePresence;case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+r.type;throw r.str.length>=1&&(n+=" with value '"+r.str+"'"),new t.QueryParseError(n,r.start,r.end)}},t.QueryParser.parsePresence=function(e){var r=e.consumeLexeme();if(r!=null){switch(r.str){case"-":e.currentClause.presence=t.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=t.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+r.str+"'";throw new t.QueryParseError(n,r.start,r.end)}var i=e.peekLexeme();if(i==null){var n="expecting term or field, found nothing";throw new t.QueryParseError(n,r.start,r.end)}switch(i.type){case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expecting term or field, found '"+i.type+"'";throw new t.QueryParseError(n,i.start,i.end)}}},t.QueryParser.parseField=function(e){var r=e.consumeLexeme();if(r!=null){if(e.query.allFields.indexOf(r.str)==-1){var n=e.query.allFields.map(function(o){return"'"+o+"'"}).join(", "),i="unrecognised field '"+r.str+"', possible fields: "+n;throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.fields=[r.str];var s=e.peekLexeme();if(s==null){var i="expecting term, found nothing";throw new t.QueryParseError(i,r.start,r.end)}switch(s.type){case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var i="expecting term, found '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseTerm=function(e){var r=e.consumeLexeme();if(r!=null){e.currentClause.term=r.str.toLowerCase(),r.str.indexOf("*")!=-1&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(n==null){e.nextClause();return}switch(n.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+n.type+"'";throw new t.QueryParseError(i,n.start,n.end)}}},t.QueryParser.parseEditDistance=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="edit distance must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.editDistance=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseBoost=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="boost must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.boost=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},function(e,r){typeof define=="function"&&define.amd?define(r):typeof J=="object"?X.exports=r():e.lunr=r()}(this,function(){return t})})()});var ee=Y((Le,K)=>{"use strict";var ve=/["'&<>]/;K.exports=xe;function xe(t){var e=""+t,r=ve.exec(e);if(!r)return e;var n,i="",s=0,o=0;for(s=r.index;s`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${t.separator})(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(e,"|")})`,"img");return s=>s.replace(i,r).replace(/<\/mark>(\s+)]*>/img,"$1")}}function ie(t){let e=new lunr.Query(["title","text"]);return new lunr.QueryParser(t,e).parse(),e.clauses}function se(t,e){let r=new Set(t),n={};for(let i=0;i!n.has(i)))]}var W=class{constructor({config:e,docs:r,index:n,options:i}){this.options=i,this.documents=re(r),this.highlight=ne(e),lunr.tokenizer.separator=new RegExp(e.separator),typeof n=="undefined"?this.index=lunr(function(){e.lang.length===1&&e.lang[0]!=="en"?this.use(lunr[e.lang[0]]):e.lang.length>1&&this.use(lunr.multiLanguage(...e.lang));let s=Se(["trimmer","stopWordFilter","stemmer"],i.pipeline);for(let o of e.lang.map(a=>a==="en"?lunr:lunr[a]))for(let a of s)this.pipeline.remove(o[a]),this.searchPipeline.remove(o[a]);this.field("title",{boost:1e3}),this.field("text"),this.ref("location");for(let o of r)this.add(o)}):this.index=lunr.Index.load(n)}search(e){if(e)try{let r=this.highlight(e),n=ie(e).filter(o=>o.presence!==lunr.Query.presence.PROHIBITED),i=this.index.search(`${e}*`).reduce((o,{ref:a,score:u,matchData:c})=>{let h=this.documents.get(a);if(typeof h!="undefined"){let{location:y,title:g,text:b,parent:v}=h,Q=se(n,Object.keys(c.metadata)),f=+!v+ +Object.values(Q).every(p=>p);o.push({location:y,title:r(g),text:r(b),score:u*(1+f),terms:Q})}return o},[]).sort((o,a)=>a.score-o.score).reduce((o,a)=>{let u=this.documents.get(a.location);if(typeof u!="undefined"){let c="parent"in u?u.parent.location:u.location;o.set(c,[...o.get(c)||[],a])}return o},new Map),s;if(this.options.suggestions){let o=this.index.query(a=>{for(let u of n)a.term(u.term,{fields:["title"],presence:lunr.Query.presence.REQUIRED,wildcard:lunr.Query.wildcard.TRAILING})});s=o.length?Object.keys(o[0].matchData.metadata):[]}return H({items:[...i.values()]},typeof s!="undefined"&&{suggestions:s})}catch(r){console.warn(`Invalid query: ${e} \u2013 see https://bit.ly/2s3ChXG`)}return{items:[]}}};var T;(function(t){t[t.SETUP=0]="SETUP",t[t.READY=1]="READY",t[t.QUERY=2]="QUERY",t[t.RESULT=3]="RESULT"})(T||(T={}));var q;function Qe(t){return z(this,null,function*(){let e="../lunr";if(typeof parent!="undefined"&&"IFrameWorker"in parent){let n=document.querySelector("script[src]"),[i]=n.src.split("/worker");e=e.replace("..",i)}let r=[];for(let n of t.lang)n==="ja"&&r.push(`${e}/tinyseg.js`),n!=="en"&&r.push(`${e}/min/lunr.${n}.min.js`);t.lang.length>1&&r.push(`${e}/min/lunr.multi.min.js`),r.length&&(yield importScripts(`${e}/min/lunr.stemmer.support.min.js`,...r))})}function be(t){return z(this,null,function*(){switch(t.type){case T.SETUP:return yield Qe(t.data.config),q=new W(t.data),{type:T.READY};case T.QUERY:return{type:T.RESULT,data:q?q.search(t.data):{items:[]}};default:throw new TypeError("Invalid message type")}})}self.lunr=oe.default;addEventListener("message",t=>z(void 0,null,function*(){postMessage(yield be(t.data))}));})(); +/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */ +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Set + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.tokenizer + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.utils + * Copyright (C) 2020 Oliver Nightingale + */ +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 + * Copyright (C) 2020 Oliver Nightingale + * @license MIT + */ diff --git a/0.2/assets/stylesheets/main.a2a6bca7.min.css b/0.2/assets/stylesheets/main.a2a6bca7.min.css deleted file mode 100644 index ea83facd..00000000 --- a/0.2/assets/stylesheets/main.a2a6bca7.min.css +++ /dev/null @@ -1 +0,0 @@ -html{box-sizing:border-box;-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}*,*::before,*::after{box-sizing:inherit}body{margin:0}hr{box-sizing:content-box;overflow:visible}a,button,label,input{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}small{font-size:80%}sub,sup{line-height:1em}img{border-style:none}table{border-collapse:separate;border-spacing:0}td,th{font-weight:normal;vertical-align:top}button{margin:0;padding:0;font-size:inherit;background:transparent;border:0}input{border:0;outline:none}:root{--md-default-fg-color: hsla(0, 0%, 0%, 0.87);--md-default-fg-color--light: hsla(0, 0%, 0%, 0.54);--md-default-fg-color--lighter: hsla(0, 0%, 0%, 0.32);--md-default-fg-color--lightest: hsla(0, 0%, 0%, 0.07);--md-default-bg-color: hsla(0, 0%, 100%, 1);--md-default-bg-color--light: hsla(0, 0%, 100%, 0.7);--md-default-bg-color--lighter: hsla(0, 0%, 100%, 0.3);--md-default-bg-color--lightest: hsla(0, 0%, 100%, 0.12);--md-primary-fg-color: hsla(231, 48%, 48%, 1);--md-primary-fg-color--light: hsla(231, 44%, 56%, 1);--md-primary-fg-color--dark: hsla(232, 54%, 41%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7);--md-accent-fg-color: hsla(231, 99%, 66%, 1);--md-accent-fg-color--transparent: hsla(231, 99%, 66%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}:root>*{--md-code-fg-color: hsla(200, 18%, 26%, 1);--md-code-bg-color: hsla(0, 0%, 96%, 1);--md-code-hl-color: hsla(60, 100%, 50%, 0.5);--md-code-hl-number-color: hsla(0, 67%, 50%, 1);--md-code-hl-special-color: hsla(340, 83%, 47%, 1);--md-code-hl-function-color: hsla(291, 45%, 50%, 1);--md-code-hl-constant-color: hsla(250, 63%, 60%, 1);--md-code-hl-keyword-color: hsla(219, 54%, 51%, 1);--md-code-hl-string-color: hsla(150, 63%, 30%, 1);--md-code-hl-name-color: var(--md-code-fg-color);--md-code-hl-operator-color: var(--md-default-fg-color--light);--md-code-hl-punctuation-color: var(--md-default-fg-color--light);--md-code-hl-comment-color: var(--md-default-fg-color--light);--md-code-hl-generic-color: var(--md-default-fg-color--light);--md-code-hl-variable-color: var(--md-default-fg-color--light);--md-typeset-color: var(--md-default-fg-color);--md-typeset-a-color: var(--md-primary-fg-color);--md-typeset-mark-color: hsla(60, 100%, 50%, 0.5);--md-typeset-del-color: hsla(6, 90%, 60%, 0.15);--md-typeset-ins-color: hsla(150, 90%, 44%, 0.15);--md-typeset-kbd-color: hsla(0, 0%, 98%, 1);--md-typeset-kbd-accent-color: hsla(0, 100%, 100%, 1);--md-typeset-kbd-border-color: hsla(0, 0%, 72%, 1);--md-admonition-fg-color: var(--md-default-fg-color);--md-admonition-bg-color: var(--md-default-bg-color);--md-footer-fg-color: hsla(0, 0%, 100%, 1);--md-footer-fg-color--light: hsla(0, 0%, 100%, 0.7);--md-footer-fg-color--lighter: hsla(0, 0%, 100%, 0.3);--md-footer-bg-color: hsla(0, 0%, 0%, 0.87);--md-footer-bg-color--dark: hsla(0, 0%, 0%, 0.32)}.md-icon svg{display:block;width:1.2rem;height:1.2rem;fill:currentColor}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:var(--md-typeset-color);font-feature-settings:"kern","liga";font-family:-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif}code,pre,kbd{color:var(--md-typeset-color);font-feature-settings:"kern";font-family:SFMono-Regular,Consolas,Menlo,monospace}:root{--md-typeset-table--ascending: url("data:image/svg+xml;charset=utf-8,");--md-typeset-table--descending: url("data:image/svg+xml;charset=utf-8,")}.md-typeset{font-size:.8rem;line-height:1.6;-webkit-print-color-adjust:exact;color-adjust:exact}@media print{.md-typeset{font-size:.68rem}}.md-typeset p,.md-typeset ul,.md-typeset ol,.md-typeset blockquote{margin:1em 0}.md-typeset h1{margin:0 0 1.25em;color:var(--md-default-fg-color--light);font-weight:300;font-size:2em;line-height:1.3;letter-spacing:-0.01em}.md-typeset h2{margin:1.6em 0 .64em;font-weight:300;font-size:1.5625em;line-height:1.4;letter-spacing:-0.01em}.md-typeset h3{margin:1.6em 0 .8em;font-weight:400;font-size:1.25em;line-height:1.5;letter-spacing:-0.01em}.md-typeset h2+h3{margin-top:.8em}.md-typeset h4{margin:1em 0;font-weight:700;letter-spacing:-0.01em}.md-typeset h5,.md-typeset h6{margin:1.25em 0;color:var(--md-default-fg-color--light);font-weight:700;font-size:.8em;letter-spacing:-0.01em}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.05rem dotted var(--md-default-fg-color--lighter)}.md-typeset a{color:var(--md-typeset-a-color);word-break:break-word}.md-typeset a,.md-typeset a::before{transition:color 125ms}.md-typeset a:focus,.md-typeset a:hover{color:var(--md-accent-fg-color)}.md-typeset code,.md-typeset pre,.md-typeset kbd{color:var(--md-code-fg-color);direction:ltr}@media print{.md-typeset code,.md-typeset pre,.md-typeset kbd{white-space:pre-wrap}}.md-typeset code{padding:0 .2941176471em;font-size:.85em;word-break:break-word;background-color:var(--md-code-bg-color);border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset code:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:initial;padding:initial;background-color:transparent;box-shadow:none}.md-typeset a>code{color:currentColor}.md-typeset pre{position:relative;margin:1em 0;line-height:1.4}.md-typeset pre>code{display:block;margin:0;padding:.7720588235em 1.1764705882em;overflow:auto;word-break:normal;box-shadow:none;-webkit-box-decoration-break:slice;box-decoration-break:slice;touch-action:auto;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}.md-typeset pre>code:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-typeset pre>code::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@media screen and (max-width: 44.9375em){.md-typeset>pre{margin:1em -0.8rem}.md-typeset>pre code{border-radius:0}}.md-typeset kbd{display:inline-block;padding:0 .6666666667em;color:var(--md-default-fg-color);font-size:.75em;vertical-align:text-top;word-break:break-word;background-color:var(--md-typeset-kbd-color);border-radius:.1rem;box-shadow:0 .1rem 0 .05rem var(--md-typeset-kbd-border-color),0 .1rem 0 var(--md-typeset-kbd-border-color),0 -0.1rem .2rem var(--md-typeset-kbd-accent-color) inset}.md-typeset mark{color:inherit;word-break:break-word;background-color:var(--md-typeset-mark-color);-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{text-decoration:none;border-bottom:.05rem dotted var(--md-default-fg-color--light);cursor:help}@media(hover: none){.md-typeset abbr{position:relative}.md-typeset abbr[title]:focus::after,.md-typeset abbr[title]:hover::after{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:absolute;left:0;display:inline-block;width:auto;min-width:-webkit-max-content;min-width:-moz-max-content;min-width:max-content;max-width:80%;margin-top:2em;padding:.2rem .3rem;color:var(--md-default-bg-color);font-size:.7rem;background-color:var(--md-default-fg-color);border-radius:.1rem;content:attr(title)}}.md-typeset small{opacity:.75}.md-typeset sup,.md-typeset sub{margin-left:.078125em}[dir=rtl] .md-typeset sup,[dir=rtl] .md-typeset sub{margin-right:.078125em;margin-left:initial}.md-typeset blockquote{padding-left:.6rem;color:var(--md-default-fg-color--light);border-left:.2rem solid var(--md-default-fg-color--lighter)}[dir=rtl] .md-typeset blockquote{padding-right:.6rem;padding-left:initial;border-right:.2rem solid var(--md-default-fg-color--lighter);border-left:initial}.md-typeset ul{list-style-type:disc}.md-typeset ul,.md-typeset ol{margin-left:.625em;padding:0}[dir=rtl] .md-typeset ul,[dir=rtl] .md-typeset ol{margin-right:.625em;margin-left:initial}.md-typeset ul ol,.md-typeset ol ol{list-style-type:lower-alpha}.md-typeset ul ol ol,.md-typeset ol ol ol{list-style-type:lower-roman}.md-typeset ul li,.md-typeset ol li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ul li,[dir=rtl] .md-typeset ol li{margin-right:1.25em;margin-left:initial}.md-typeset ul li p,.md-typeset ul li blockquote,.md-typeset ol li p,.md-typeset ol li blockquote{margin:.5em 0}.md-typeset ul li:last-child,.md-typeset ol li:last-child{margin-bottom:0}.md-typeset ul li ul,.md-typeset ul li ol,.md-typeset ol li ul,.md-typeset ol li ol{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ul li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ol li ol{margin-right:.625em;margin-left:initial}.md-typeset dd{margin:1em 0 1.5em 1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em;margin-left:initial}.md-typeset img,.md-typeset svg{max-width:100%;height:auto}.md-typeset img[align=left],.md-typeset svg[align=left]{margin:1em;margin-left:0}.md-typeset img[align=right],.md-typeset svg[align=right]{margin:1em;margin-right:0}.md-typeset img[align]:only-child,.md-typeset svg[align]:only-child{margin-top:0}.md-typeset figure{width:-webkit-fit-content;width:-moz-fit-content;width:fit-content;max-width:100%;margin:0 auto;text-align:center}.md-typeset figcaption{max-width:24rem;margin:.5em auto 2em;font-style:italic}.md-typeset iframe{max-width:100%}.md-typeset table:not([class]){display:inline-block;max-width:100%;overflow:auto;font-size:.64rem;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1);touch-action:auto}@media print{.md-typeset table:not([class]){display:table}}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) th>*:first-child,.md-typeset table:not([class]) td>*:first-child{margin-top:0}.md-typeset table:not([class]) th>*:last-child,.md-typeset table:not([class]) td>*:last-child{margin-bottom:0}.md-typeset table:not([class]) th:not([align]),.md-typeset table:not([class]) td:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) th:not([align]),[dir=rtl] .md-typeset table:not([class]) td:not([align]){text-align:right}.md-typeset table:not([class]) th{min-width:5rem;padding:.9375em 1.25em;color:var(--md-default-bg-color);vertical-align:top;background-color:var(--md-default-fg-color--light)}.md-typeset table:not([class]) th a{color:inherit}.md-typeset table:not([class]) td{padding:.9375em 1.25em;vertical-align:top;border-top:.05rem solid var(--md-default-fg-color--lightest)}.md-typeset table:not([class]) tr{transition:background-color 125ms}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.035);box-shadow:0 .05rem 0 var(--md-default-bg-color) inset}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset table th[role=columnheader]{cursor:pointer}.md-typeset table th[role=columnheader]::after{display:inline-block;width:1.2em;height:1.2em;margin-left:.5em;vertical-align:sub;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-typeset table th[role=columnheader][aria-sort=ascending]::after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--ascending);mask-image:var(--md-typeset-table--ascending)}.md-typeset table th[role=columnheader][aria-sort=descending]::after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--descending);mask-image:var(--md-typeset-table--descending)}.md-typeset__scrollwrap{margin:1em -0.8rem;overflow-x:auto;touch-action:auto}.md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}@media print{.md-typeset__table{display:block}}html .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{height:100%;overflow-x:hidden;font-size:125%}@media screen and (min-width: 100em){html{font-size:137.5%}}@media screen and (min-width: 125em){html{font-size:150%}}body{position:relative;display:flex;flex-direction:column;width:100%;min-height:100%;font-size:.5rem;background-color:var(--md-default-bg-color)}@media screen and (max-width: 59.9375em){body[data-md-state=lock]{position:fixed}}@media print{body{display:block}}hr{display:block;height:.05rem;padding:0;border:0}.md-grid{max-width:61rem;margin-right:auto;margin-left:auto}.md-container{display:flex;flex-direction:column;flex-grow:1}@media print{.md-container{display:block}}.md-main{flex-grow:1}.md-main__inner{display:flex;height:100%;margin-top:1.5rem}.md-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.md-toggle{display:none}.md-overlay{position:fixed;top:0;z-index:3;width:0;height:0;background-color:rgba(0,0,0,.54);opacity:0;transition:width 0ms 250ms,height 0ms 250ms,opacity 250ms}@media screen and (max-width: 76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;opacity:1;transition:width 0ms,height 0ms,opacity 250ms}}.md-skip{position:fixed;z-index:-1;margin:.5rem;padding:.3rem .5rem;color:var(--md-default-bg-color);font-size:.64rem;background-color:var(--md-default-fg-color);border-radius:.1rem;transform:translateY(0.4rem);opacity:0}.md-skip:focus{z-index:10;transform:translateY(0);opacity:1;transition:transform 250ms cubic-bezier(0.4, 0, 0.2, 1),opacity 175ms 75ms}@page{margin:25mm}.md-announce{overflow:auto;background-color:var(--md-footer-bg-color)}.md-announce__inner{margin:.6rem auto;padding:0 .8rem;color:var(--md-footer-fg-color);font-size:.7rem}@media print{.md-announce{display:none}}.md-typeset .md-button{display:inline-block;padding:.625em 2em;color:var(--md-primary-fg-color);font-weight:700;border:.1rem solid currentColor;border-radius:.1rem;transition:color 125ms,background-color 125ms,border-color 125ms}.md-typeset .md-button--primary{color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color);border-color:var(--md-primary-fg-color)}.md-typeset .md-button:focus,.md-typeset .md-button:hover{color:var(--md-accent-bg-color);background-color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color)}:root{--md-clipboard-icon: url("data:image/svg+xml;charset=utf-8,")}.md-clipboard{position:absolute;top:.5em;right:.5em;z-index:1;width:1.5em;height:1.5em;color:var(--md-default-fg-color--lightest);border-radius:.1rem;cursor:pointer;transition:color 250ms}@media print{.md-clipboard{display:none}}.md-clipboard::after{display:block;width:1.125em;height:1.125em;margin:0 auto;background-color:currentColor;-webkit-mask-image:var(--md-clipboard-icon);mask-image:var(--md-clipboard-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}pre:hover .md-clipboard{color:var(--md-default-fg-color--light)}pre .md-clipboard:focus,pre .md-clipboard:hover{color:var(--md-accent-fg-color)}.md-content{flex-grow:1;overflow:auto}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}@media screen and (min-width: 76.25em){.md-sidebar--primary:not([hidden])~.md-content .md-content__inner{margin-left:1.2rem}[dir=rtl] .md-sidebar--primary:not([hidden])~.md-content .md-content__inner{margin-right:1.2rem;margin-left:.8rem}.md-sidebar--secondary:not([hidden])~.md-content .md-content__inner{margin-right:1.2rem}[dir=rtl] .md-sidebar--secondary:not([hidden])~.md-content .md-content__inner{margin-right:.8rem;margin-left:1.2rem}}.md-content__inner::before{display:block;height:.4rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__button{float:right;margin:.4rem 0;margin-left:.4rem;padding:0}[dir=rtl] .md-content__button{float:left;margin-right:.4rem;margin-left:initial}[dir=rtl] .md-content__button svg{transform:scaleX(-1)}.md-typeset .md-content__button{color:var(--md-default-fg-color--lighter)}.md-content__button svg{display:inline;vertical-align:top}@media print{.md-content__button{display:none}}.md-dialog{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:fixed;right:.8rem;bottom:.8rem;left:initial;z-index:2;display:block;min-width:11.1rem;padding:.4rem .6rem;color:var(--md-default-bg-color);font-size:.7rem;background-color:var(--md-default-fg-color);border:none;border-radius:.1rem;transform:translateY(100%);opacity:0;transition:transform 0ms 400ms,opacity 400ms}[dir=rtl] .md-dialog{right:initial;left:.8rem}.md-dialog[data-md-state=open]{transform:translateY(0);opacity:1;transition:transform 400ms cubic-bezier(0.075, 0.85, 0.175, 1),opacity 400ms}@media print{.md-dialog{display:none}}.md-header{position:-webkit-sticky;position:sticky;top:0;right:0;left:0;z-index:2;height:2.4rem;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color);box-shadow:0 0 .2rem rgba(0,0,0,0),0 .2rem .4rem rgba(0,0,0,0);transition:color 250ms,background-color 250ms}.no-js .md-header{box-shadow:none;transition:none}.md-header[data-md-state=shadow]{box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2);transition:transform 250ms cubic-bezier(0.1, 0.7, 0.1, 1),color 250ms,background-color 250ms,box-shadow 250ms}.md-header[data-md-state=hidden]{transform:translateY(-100%);transition:transform 250ms cubic-bezier(0.8, 0, 0.6, 1),color 250ms,background-color 250ms,box-shadow 250ms}@media print{.md-header{display:none}}.md-header-nav{display:flex;padding:0 .2rem}.md-header-nav__button{position:relative;z-index:1;display:block;margin:.2rem;padding:.4rem;color:currentColor;cursor:pointer;transition:opacity 250ms}.md-header-nav__button:not(.focus-visible){outline:none}.md-header-nav__button:focus,.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo{margin:.2rem;padding:.4rem}.md-header-nav__button.md-logo img,.md-header-nav__button.md-logo svg{display:block;width:1.2rem;height:1.2rem;fill:currentColor}[dir=rtl] .md-header-nav__button[for=__search] svg{transform:scaleX(-1)}.no-js .md-header-nav__button[for=__search]{display:none}@media screen and (min-width: 60em){.md-header-nav__button[for=__search]{display:none}}@media screen and (max-width: 76.1875em){.md-header-nav__button.md-logo{display:none}}@media screen and (min-width: 76.25em){.md-header-nav__button[for=__drawer]{display:none}}.md-header-nav__topic{position:absolute;display:flex;max-width:100%;transition:transform 400ms cubic-bezier(0.1, 0.7, 0.1, 1),opacity 150ms}.md-header-nav__topic+.md-header-nav__topic{z-index:-1;transform:translateX(1.25rem);opacity:0;transition:transform 400ms cubic-bezier(1, 0.7, 0.1, 0.1),opacity 150ms;pointer-events:none}[dir=rtl] .md-header-nav__topic+.md-header-nav__topic{transform:translateX(-1.25rem)}.no-js .md-header-nav__topic{position:initial}.no-js .md-header-nav__topic+.md-header-nav__topic{display:none}.md-header-nav__title{flex-grow:1;margin-right:.4rem;margin-left:1rem;font-size:.9rem;line-height:2.4rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{z-index:-1;transform:translateX(-1.25rem);opacity:0;transition:transform 400ms cubic-bezier(1, 0.7, 0.1, 0.1),opacity 150ms;pointer-events:none}[dir=rtl] .md-header-nav__title[data-md-state=active] .md-header-nav__topic{transform:translateX(1.25rem)}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{z-index:0;transform:translateX(0);opacity:1;transition:transform 400ms cubic-bezier(0.1, 0.7, 0.1, 1),opacity 150ms;pointer-events:initial}.md-header-nav__title>.md-header-nav__ellipsis{position:relative;width:100%;height:100%}.md-header-nav__options{max-width:100%;transition:max-width 0ms 250ms,opacity 250ms 250ms}[data-md-toggle=search]:checked~.md-header .md-header-nav__options{max-width:0;opacity:0;transition:max-width 0ms,opacity 0ms}.md-header-nav__options>[data-md-state=hidden]{display:none}.md-header-nav__source{display:none}@media screen and (min-width: 60em){.md-header-nav__source{display:block;width:11.7rem;max-width:11.7rem;margin-left:1rem}[dir=rtl] .md-header-nav__source{margin-right:1rem;margin-left:initial}}@media screen and (min-width: 76.25em){.md-header-nav__source{margin-left:1.4rem}[dir=rtl] .md-header-nav__source{margin-right:1.4rem}}.md-footer{color:var(--md-footer-fg-color);background-color:var(--md-footer-bg-color)}@media print{.md-footer{display:none}}.md-footer-nav__inner{padding:.2rem;overflow:auto}.md-footer-nav__link{display:flex;padding-top:1.4rem;padding-bottom:.4rem;transition:opacity 250ms}@media screen and (min-width: 45em){.md-footer-nav__link{width:50%}}.md-footer-nav__link:focus,.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{float:left}[dir=rtl] .md-footer-nav__link--prev{float:right}[dir=rtl] .md-footer-nav__link--prev svg{transform:scaleX(-1)}@media screen and (max-width: 44.9375em){.md-footer-nav__link--prev{width:25%}.md-footer-nav__link--prev .md-footer-nav__title{display:none}}.md-footer-nav__link--next{float:right;text-align:right}[dir=rtl] .md-footer-nav__link--next{float:left;text-align:left}[dir=rtl] .md-footer-nav__link--next svg{transform:scaleX(-1)}@media screen and (max-width: 44.9375em){.md-footer-nav__link--next{width:75%}}.md-footer-nav__title{position:relative;flex-grow:1;max-width:calc(100% - 2.4rem);padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-footer-nav__button{margin:.2rem;padding:.4rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-1rem;padding:0 1rem;font-size:.64rem;opacity:.7}.md-footer-meta{background-color:var(--md-footer-bg-color--dark)}.md-footer-meta__inner{display:flex;flex-wrap:wrap;justify-content:space-between;padding:.2rem}html .md-footer-meta.md-typeset a{color:var(--md-footer-fg-color--light)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:var(--md-footer-fg-color)}.md-footer-copyright{width:100%;margin:auto .6rem;padding:.4rem 0;color:var(--md-footer-fg-color--lighter);font-size:.64rem}@media screen and (min-width: 45em){.md-footer-copyright{width:auto}}.md-footer-copyright__highlight{color:var(--md-footer-fg-color--light)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}@media screen and (min-width: 45em){.md-footer-social{padding:.6rem 0}}.md-footer-social__link{display:inline-block;width:1.6rem;height:1.6rem;text-align:center}.md-footer-social__link::before{line-height:1.9}.md-footer-social__link svg{max-height:.8rem;vertical-align:-25%;fill:currentColor}:root{--md-nav-icon--prev: url("data:image/svg+xml;charset=utf-8,");--md-nav-icon--next: url("data:image/svg+xml;charset=utf-8,");--md-toc-icon: url("data:image/svg+xml;charset=utf-8,")}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;padding:0 .6rem;overflow:hidden;font-weight:700;text-overflow:ellipsis}.md-nav__title .md-nav__button{display:none}.md-nav__title .md-nav__button img{width:100%;height:auto}.md-nav__title .md-nav__button.md-logo img,.md-nav__title .md-nav__button.md-logo svg{display:block;width:2.4rem;height:2.4rem}.md-nav__title .md-nav__button.md-logo svg{fill:currentColor}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 .6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-right:.6rem;padding-left:0}.md-nav__link{display:block;margin-top:.625em;overflow:hidden;text-overflow:ellipsis;cursor:pointer;transition:color 125ms;scroll-snap-align:start}html .md-nav__link[for=__toc]{display:none}html .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:var(--md-default-fg-color--light)}.md-nav__item .md-nav__link--active{color:var(--md-typeset-a-color)}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav__source{display:none}@media screen and (max-width: 76.1875em){.md-nav--primary,.md-nav--primary .md-nav{position:absolute;top:0;right:0;left:0;z-index:1;display:flex;flex-direction:column;height:100%;background-color:var(--md-default-bg-color)}.md-nav--primary .md-nav__title,.md-nav--primary .md-nav__item{font-size:.8rem;line-height:1.5}.md-nav--primary .md-nav__title{position:relative;height:5.6rem;padding:3rem .8rem .2rem;color:var(--md-default-fg-color--light);font-weight:400;line-height:2.4rem;white-space:nowrap;background-color:var(--md-default-fg-color--lightest);cursor:pointer}.md-nav--primary .md-nav__title .md-nav__icon{position:absolute;top:.4rem;left:.4rem;display:block;width:1.2rem;height:1.2rem;margin:.2rem}.md-nav--primary .md-nav__title .md-nav__icon::after{display:block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--prev);mask-image:var(--md-nav-icon--prev);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-nav--primary .md-nav__title .md-nav__icon{right:.4rem;left:initial}.md-nav--primary .md-nav__title~.md-nav__list{overflow-y:auto;background-color:var(--md-default-bg-color);box-shadow:0 .05rem 0 var(--md-default-fg-color--lightest) inset;-webkit-scroll-snap-type:y mandatory;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory;touch-action:pan-y}.md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}.md-nav--primary .md-nav__title[for=__drawer]{color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color)}.md-nav--primary .md-nav__title[for=__drawer] .md-nav__button{position:absolute;top:.2rem;left:.2rem;display:block;margin:.2rem;padding:.4rem}html [dir=rtl] .md-nav--primary .md-nav__title[for=__drawer] .md-nav__button{right:.2rem;left:initial}.md-nav--primary .md-nav__list{flex:1}.md-nav--primary .md-nav__item{padding:0;border-top:.05rem solid var(--md-default-fg-color--lightest)}[dir=rtl] .md-nav--primary .md-nav__item{padding:0}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__item--active>.md-nav__link{color:var(--md-typeset-a-color)}.md-nav--primary .md-nav__item--active>.md-nav__link:focus,.md-nav--primary .md-nav__item--active>.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link .md-nav__icon{position:absolute;top:50%;right:.6rem;width:1.2rem;height:1.2rem;margin-top:-0.6rem;color:inherit;font-size:1.2rem}.md-nav--primary .md-nav__link .md-nav__icon::after{display:block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-nav--primary .md-nav__link .md-nav__icon{right:initial;left:.6rem}[dir=rtl] .md-nav--primary .md-nav__icon::after{transform:scale(-1)}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem;padding-left:initial}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem;padding-left:initial}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem;padding-left:initial}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem;padding-left:initial}.md-nav--secondary{background-color:transparent}.md-nav__toggle~.md-nav{display:flex;transform:translateX(100%);opacity:0;transition:transform 250ms cubic-bezier(0.8, 0, 0.6, 1),opacity 125ms 50ms}[dir=rtl] .md-nav__toggle~.md-nav{transform:translateX(-100%)}.md-nav__toggle:checked~.md-nav{transform:translateX(0);opacity:1;transition:transform 250ms cubic-bezier(0.4, 0, 0.2, 1),opacity 125ms 125ms}.md-nav__toggle:checked~.md-nav>.md-nav__list{-webkit-backface-visibility:hidden;backface-visibility:hidden}}@media screen and (max-width: 59.9375em){html .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}html .md-nav__link[for=__toc]+.md-nav__link{display:none}html .md-nav__link[for=__toc] .md-icon::after{display:block;width:100%;height:100%;-webkit-mask-image:var(--md-toc-icon);mask-image:var(--md-toc-icon);background-color:currentColor;content:""}html .md-nav__link[for=__toc]~.md-nav{display:flex}html [dir=rtl] .md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav__source{display:block;padding:0 .2rem;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color--dark)}}@media screen and (min-width: 60em){.md-nav--secondary .md-nav__title[for=__toc]{scroll-snap-align:start}.md-nav--secondary .md-nav__title .md-nav__icon{display:none}}@media screen and (min-width: 76.25em){.md-nav{transition:max-height 250ms cubic-bezier(0.86, 0, 0.07, 1)}.md-nav--primary .md-nav__title[for=__drawer]{scroll-snap-align:start}.md-nav--primary .md-nav__title .md-nav__icon{display:none}.md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav,.md-nav__toggle:indeterminate~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--section{display:block;margin:1.25em 0}.md-nav__item--section:last-child{margin-bottom:0}.md-nav__item--section>.md-nav__link{display:none}.md-nav__item--section>.md-nav{display:block}.md-nav__item--section>.md-nav>.md-nav__title{display:block;padding:0;pointer-events:none;scroll-snap-align:start}.md-nav__item--section>.md-nav>.md-nav__list>.md-nav__item{padding:0}.md-nav__icon{float:right;width:.9rem;height:.9rem;transition:transform 250ms}[dir=rtl] .md-nav__icon{float:left;transform:rotate(180deg)}.md-nav__icon::after{display:inline-block;width:100%;height:100%;vertical-align:-0.1rem;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link .md-nav__icon,.md-nav__item--nested .md-nav__toggle:indeterminate~.md-nav__link .md-nav__icon{transform:rotate(90deg)}}:root{--md-search-result-icon: url("data:image/svg+xml;charset=utf-8,")}.md-search{position:relative}.no-js .md-search{display:none}@media screen and (min-width: 60em){.md-search{padding:.2rem 0}}.md-search__overlay{z-index:1;opacity:0}@media screen and (max-width: 59.9375em){.md-search__overlay{position:absolute;top:.2rem;left:-2.2rem;width:2rem;height:2rem;overflow:hidden;background-color:var(--md-default-bg-color);border-radius:1rem;transform-origin:center;transition:transform 300ms 100ms,opacity 200ms 200ms;pointer-events:none}[dir=rtl] .md-search__overlay{right:-2.2rem;left:initial}[data-md-toggle=search]:checked~.md-header .md-search__overlay{opacity:1;transition:transform 400ms,opacity 100ms}}@media screen and (max-width: 29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(45)}}@media screen and (min-width: 30em)and (max-width: 44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(60)}}@media screen and (min-width: 45em)and (max-width: 59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(75)}}@media screen and (min-width: 60em){.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;background-color:rgba(0,0,0,.54);cursor:pointer;transition:width 0ms 250ms,height 0ms 250ms,opacity 250ms}[dir=rtl] .md-search__overlay{right:0;left:initial}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;opacity:1;transition:width 0ms,height 0ms,opacity 250ms}}.md-search__inner{-webkit-backface-visibility:hidden;backface-visibility:hidden}@media screen and (max-width: 59.9375em){.md-search__inner{position:fixed;top:0;left:100%;z-index:2;width:100%;height:100%;transform:translateX(5%);opacity:0;transition:right 0ms 300ms,left 0ms 300ms,transform 150ms 150ms cubic-bezier(0.4, 0, 0.2, 1),opacity 150ms 150ms}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;transform:translateX(0);opacity:1;transition:right 0ms 0ms,left 0ms 0ms,transform 150ms 150ms cubic-bezier(0.1, 0.7, 0.1, 1),opacity 150ms 150ms}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{right:0;left:initial}html [dir=rtl] .md-search__inner{right:100%;left:initial;transform:translateX(-5%)}}@media screen and (min-width: 60em){.md-search__inner{position:relative;float:right;width:11.7rem;padding:.1rem 0;transition:width 250ms cubic-bezier(0.1, 0.7, 0.1, 1)}[dir=rtl] .md-search__inner{float:left}}@media screen and (min-width: 60em)and (max-width: 76.1875em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}}@media screen and (min-width: 76.25em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}}.md-search__form{position:relative;z-index:2;height:2.4rem;background-color:var(--md-default-bg-color);transition:color 250ms,background-color 250ms}@media screen and (min-width: 60em){.md-search__form{height:1.8rem;background-color:rgba(0,0,0,.26);border-radius:.1rem}.md-search__form:hover{background-color:rgba(255,255,255,.12)}}[data-md-toggle=search]:checked~.md-header .md-search__form{background-color:var(--md-default-bg-color);border-radius:.1rem .1rem 0 0}.md-search__input{position:relative;z-index:2;width:100%;height:100%;padding:0 2.2rem 0 3.6rem;font-size:.9rem;text-overflow:ellipsis;background:transparent}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color 250ms;transition:color 250ms}.md-search__input::-moz-placeholder{-moz-transition:color 250ms;transition:color 250ms}.md-search__input::-ms-input-placeholder{-ms-transition:color 250ms;transition:color 250ms}.md-search__input::placeholder{transition:color 250ms}.md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input~.md-search__icon,.md-search__input::placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-ms-clear{display:none}@media screen and (min-width: 60em){.md-search__input{padding-left:2.2rem;color:inherit;font-size:.8rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input+.md-search__icon{color:var(--md-primary-bg-color)}.md-search__input::-webkit-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::placeholder{color:var(--md-primary-bg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input{color:var(--md-default-fg-color);text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:var(--md-default-fg-color--light)}}.md-search__suggest{position:absolute;top:0;display:flex;align-items:center;width:100%;height:100%;padding:0 2.2rem 0 3.6rem;color:var(--md-default-fg-color--lighter);font-size:.9rem;white-space:nowrap;opacity:0;transition:opacity 50ms}[dir=rtl] .md-search__suggest{padding:0 3.6rem 0 2.2rem}@media screen and (min-width: 60em){.md-search__suggest{padding-left:2.2rem;font-size:.8rem}[dir=rtl] .md-search__suggest{padding-right:2.2rem}}[data-md-toggle=search]:checked~.md-header .md-search__suggest{opacity:1;transition:opacity 300ms 100ms}.md-search__icon{position:absolute;z-index:2;width:1.2rem;height:1.2rem;cursor:pointer;transition:color 250ms,opacity 250ms}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{top:.3rem;left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem;left:initial}[dir=rtl] .md-search__icon[for=__search] svg{transform:scaleX(-1)}@media screen and (max-width: 59.9375em){.md-search__icon[for=__search]{top:.6rem;left:.8rem}[dir=rtl] .md-search__icon[for=__search]{right:.8rem;left:initial}.md-search__icon[for=__search] svg:first-child{display:none}}@media screen and (min-width: 60em){.md-search__icon[for=__search]{pointer-events:none}.md-search__icon[for=__search] svg:last-child{display:none}}.md-search__icon[type=reset]{top:.3rem;right:.5rem;transform:scale(0.75);opacity:0;transition:transform 150ms cubic-bezier(0.1, 0.7, 0.1, 1),opacity 150ms;pointer-events:none}[dir=rtl] .md-search__icon[type=reset]{right:initial;left:.5rem}@media screen and (max-width: 59.9375em){.md-search__icon[type=reset]{top:.6rem;right:.8rem}[dir=rtl] .md-search__icon[type=reset]{right:initial;left:.8rem}}[data-md-toggle=search]:checked~.md-header .md-search__input:not(:-moz-placeholder-shown)~.md-search__icon[type=reset]{transform:scale(1);opacity:1;pointer-events:initial}[data-md-toggle=search]:checked~.md-header .md-search__input:not(:placeholder-shown)~.md-search__icon[type=reset]{transform:scale(1);opacity:1;pointer-events:initial}[data-md-toggle=search]:checked~.md-header .md-search__input:not(:-moz-placeholder-shown)~.md-search__icon[type=reset]:hover{opacity:.7}[data-md-toggle=search]:checked~.md-header .md-search__input:not(:placeholder-shown)~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;z-index:1;width:100%;overflow:hidden;border-radius:0 0 .1rem .1rem}@media screen and (max-width: 59.9375em){.md-search__output{top:2.4rem;bottom:0}}@media screen and (min-width: 60em){.md-search__output{top:1.9rem;opacity:0;transition:opacity 400ms}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}}.md-search__scrollwrap{height:100%;overflow-y:auto;background-color:var(--md-default-bg-color);-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-scroll-snap-type:y mandatory;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory;touch-action:pan-y}@media(-webkit-max-device-pixel-ratio: 1), (max-resolution: 1dppx){.md-search__scrollwrap{transform:translateZ(0)}}@media screen and (min-width: 60em)and (max-width: 76.1875em){.md-search__scrollwrap{width:23.4rem}}@media screen and (min-width: 76.25em){.md-search__scrollwrap{width:34.4rem}}@media screen and (min-width: 60em){.md-search__scrollwrap{max-height:0;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-search__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}}.md-search-result{color:var(--md-default-fg-color);word-break:break-word}.md-search-result__meta{padding:0 .8rem;color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.8rem;background-color:var(--md-default-fg-color--lightest);scroll-snap-align:start}@media screen and (min-width: 60em){.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem;padding-left:initial}}.md-search-result__list{margin:0;padding:0;list-style:none}.md-search-result__item{box-shadow:0 -0.05rem 0 var(--md-default-fg-color--lightest)}.md-search-result__item:first-child{box-shadow:none}.md-search-result__link{display:block;outline:none;transition:background-color 250ms;scroll-snap-align:start}.md-search-result__link:focus,.md-search-result__link:hover{background-color:var(--md-accent-fg-color--transparent)}.md-search-result__link:focus .md-search-result__article::before,.md-search-result__link:hover .md-search-result__article::before{opacity:.7}.md-search-result__link:last-child p:last-child{margin-bottom:.6rem}.md-search-result__more summary{display:block;padding:.75em .8rem;color:var(--md-typeset-a-color);font-size:.64rem;outline:0;cursor:pointer;transition:color 250ms,background-color 250ms;scroll-snap-align:start}.md-search-result__more summary:focus,.md-search-result__more summary:hover{color:var(--md-accent-fg-color);background-color:var(--md-accent-fg-color--transparent)}@media screen and (min-width: 60em){.md-search-result__more summary{padding-left:2.2rem}[dir=rtl] .md-search-result__more summary{padding-right:2.2rem;padding-left:.8rem}}.md-search-result__more summary::-webkit-details-marker{display:none}.md-search-result__more summary~*>*{opacity:.65}.md-search-result__article{position:relative;padding:0 .8rem;overflow:hidden}@media screen and (min-width: 60em){.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem;padding-left:.8rem}}.md-search-result__article--document .md-search-result__title{margin:.55rem 0;font-weight:400;font-size:.8rem;line-height:1.4}.md-search-result__icon{position:absolute;left:0;width:1.2rem;height:1.2rem;margin:.5rem;color:var(--md-default-fg-color--light)}.md-search-result__icon::after{display:inline-block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-search-result-icon);mask-image:var(--md-search-result-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-search-result__icon{right:0;left:initial}[dir=rtl] .md-search-result__icon::after{transform:scaleX(-1)}@media screen and (max-width: 59.9375em){.md-search-result__icon{display:none}}.md-search-result__title{margin:.5em 0;font-weight:700;font-size:.64rem;line-height:1.6}.md-search-result__teaser{display:-webkit-box;max-height:2rem;margin:.5em 0;overflow:hidden;color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.6;text-overflow:ellipsis;-webkit-box-orient:vertical;-webkit-line-clamp:2}@media screen and (max-width: 44.9375em){.md-search-result__teaser{max-height:3rem;-webkit-line-clamp:3}}@media screen and (min-width: 60em)and (max-width: 76.1875em){.md-search-result__teaser{max-height:3rem;-webkit-line-clamp:3}}.md-search-result__teaser mark{text-decoration:underline;background-color:transparent}.md-search-result__terms{margin:.5em 0;font-size:.64rem;font-style:italic}.md-search-result mark{color:var(--md-accent-fg-color);background-color:transparent}@-webkit-keyframes md-sidebar__scrollwrap--hack{0%,99%{-webkit-scroll-snap-type:none;scroll-snap-type:none}100%{-webkit-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory}}@keyframes md-sidebar__scrollwrap--hack{0%,99%{-webkit-scroll-snap-type:none;-ms-scroll-snap-type:none;scroll-snap-type:none}100%{-webkit-scroll-snap-type:y mandatory;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory}}.md-sidebar{position:-webkit-sticky;position:sticky;top:2.4rem;flex-shrink:0;align-self:flex-start;width:12.1rem;height:0;padding:1.2rem 0}@media print{.md-sidebar{display:none}}@media screen and (max-width: 76.1875em){.md-sidebar--primary{position:fixed;top:0;left:-12.1rem;z-index:3;display:block;width:12.1rem;height:100%;background-color:var(--md-default-bg-color);transform:translateX(0);transition:transform 250ms cubic-bezier(0.4, 0, 0.2, 1),box-shadow 250ms}[dir=rtl] .md-sidebar--primary{right:-12.1rem;left:initial}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{transform:translateX(-12.1rem)}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden}}@media screen and (min-width: 76.25em){.md-sidebar{height:0}.no-js .md-sidebar{height:auto}}.md-sidebar--secondary{display:none;order:2}@media screen and (min-width: 60em){.md-sidebar--secondary{height:0}.md-sidebar--secondary:not([hidden]){display:block}.no-js .md-sidebar--secondary{height:auto}.md-sidebar--secondary .md-sidebar__scrollwrap{touch-action:pan-y}}.md-sidebar__scrollwrap{margin:0 .2rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}.js .md-sidebar__scrollwrap{-webkit-animation:md-sidebar__scrollwrap--hack 1000ms forwards;animation:md-sidebar__scrollwrap--hack 1000ms forwards}@media screen and (max-width: 76.1875em){.md-sidebar--primary .md-sidebar__scrollwrap{position:absolute;top:0;right:0;bottom:0;left:0;margin:0;-webkit-scroll-snap-type:none;-ms-scroll-snap-type:none;scroll-snap-type:none}}.md-sidebar__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@-webkit-keyframes md-source__facts--done{0%{height:0}100%{height:.65rem}}@keyframes md-source__facts--done{0%{height:0}100%{height:.65rem}}@-webkit-keyframes md-source__fact--done{0%{transform:translateY(100%);opacity:0}50%{opacity:0}100%{transform:translateY(0%);opacity:1}}@keyframes md-source__fact--done{0%{transform:translateY(100%);opacity:0}50%{opacity:0}100%{transform:translateY(0%);opacity:1}}.md-source{display:block;font-size:.65rem;line-height:1.2;white-space:nowrap;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:opacity 250ms}.md-source:hover{opacity:.7}.md-source__icon{display:inline-block;width:2.4rem;height:2.4rem;vertical-align:middle}.md-source__icon svg{margin-top:.6rem;margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem;margin-left:initial}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem;margin-left:initial;padding-right:2rem;padding-left:initial}.md-source__repository{display:inline-block;max-width:calc(100% - 1.2rem);margin-left:.6rem;overflow:hidden;font-weight:700;text-overflow:ellipsis;vertical-align:middle}.md-source__facts{margin:0;padding:0;overflow:hidden;font-weight:700;font-size:.55rem;list-style-type:none;opacity:.75}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done 250ms ease-in;animation:md-source__facts--done 250ms ease-in}.md-source__fact{float:left}[dir=rtl] .md-source__fact{float:right}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done 400ms ease-out;animation:md-source__fact--done 400ms ease-out}.md-source__fact::before{margin:0 .1rem;content:"·"}.md-source__fact:first-child::before{display:none}.md-tabs{width:100%;overflow:auto;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color);transition:background-color 250ms}.no-js .md-tabs{transition:none}@media screen and (max-width: 76.1875em){.md-tabs{display:none}}@media print{.md-tabs{display:none}}.md-tabs__list{margin:0;margin-left:.2rem;padding:0;white-space:nowrap;list-style:none;contain:content}[dir=rtl] .md-tabs__list{margin-right:.2rem;margin-left:initial}.md-tabs__item{display:inline-block;height:2.4rem;padding-right:.6rem;padding-left:.6rem}.md-tabs__link{display:block;margin-top:.8rem;font-size:.7rem;opacity:.7;transition:transform 400ms cubic-bezier(0.1, 0.7, 0.1, 1),opacity 250ms}.no-js .md-tabs__link{transition:none}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{transition-delay:20ms}.md-tabs__item:nth-child(3) .md-tabs__link{transition-delay:40ms}.md-tabs__item:nth-child(4) .md-tabs__link{transition-delay:60ms}.md-tabs__item:nth-child(5) .md-tabs__link{transition-delay:80ms}.md-tabs__item:nth-child(6) .md-tabs__link{transition-delay:100ms}.md-tabs__item:nth-child(7) .md-tabs__link{transition-delay:120ms}.md-tabs__item:nth-child(8) .md-tabs__link{transition-delay:140ms}.md-tabs__item:nth-child(9) .md-tabs__link{transition-delay:160ms}.md-tabs__item:nth-child(10) .md-tabs__link{transition-delay:180ms}.md-tabs__item:nth-child(11) .md-tabs__link{transition-delay:200ms}.md-tabs__item:nth-child(12) .md-tabs__link{transition-delay:220ms}.md-tabs__item:nth-child(13) .md-tabs__link{transition-delay:240ms}.md-tabs__item:nth-child(14) .md-tabs__link{transition-delay:260ms}.md-tabs__item:nth-child(15) .md-tabs__link{transition-delay:280ms}.md-tabs__item:nth-child(16) .md-tabs__link{transition-delay:300ms}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{transform:translateY(50%);opacity:0;transition:color 250ms,transform 0ms 400ms,opacity 100ms}@media screen and (min-width: 76.25em){.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__title{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block;padding:0}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active>.md-nav__link{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active>.md-nav>.md-nav__title{display:block;padding:0 .6rem;pointer-events:none;scroll-snap-align:start}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{display:block}}:root{--md-version-icon: url( "data:image/svg+xml;charset=utf-8," )}.md-version{flex-shrink:0;height:2.4rem;font-size:.8rem}.md-version__current{position:relative;top:.05rem;margin-right:.4rem;margin-left:1.4rem}[dir=rtl] .md-version__current{margin-right:1.4rem;margin-left:.4rem}.md-version__current::after{display:inline-block;width:.4rem;height:.6rem;margin-left:.4rem;background-color:currentColor;-webkit-mask-image:var(--md-version-icon);mask-image:var(--md-version-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;content:""}[dir=rtl] .md-version__current::after{margin-right:.4rem;margin-left:initial}.md-version__list{position:absolute;top:.05rem;max-height:2rem;margin:.2rem .8rem;padding:0;overflow:scroll;color:var(--md-default-fg-color);list-style-type:none;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);opacity:0;transition:opacity 250ms 250ms,max-height 0ms 500ms;-webkit-scroll-snap-type:y mandatory;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory}.md-version__list:focus-within,.md-version__list:hover{max-height:10rem;opacity:1;transition:opacity 250ms,max-height 250ms}.md-version__item{line-height:2rem}.md-version__link{display:block;width:100%;padding-right:1.2rem;padding-left:.6rem;cursor:pointer;transition:background-color 250ms,color 250ms;scroll-snap-align:start}[dir=rtl] .md-version__link{padding-right:.6rem;padding-left:1.2rem}.md-version__link:focus,.md-version__link:hover{background-color:var(--md-default-fg-color--lightest)}:root{--md-admonition-icon--note: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--abstract: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--info: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--tip: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--success: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--question: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--warning: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--failure: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--danger: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--bug: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--example: url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--quote: url("data:image/svg+xml;charset=utf-8,")}.md-typeset .admonition,.md-typeset details{margin:1.5625em 0;padding:0 .6rem;overflow:hidden;color:var(--md-admonition-fg-color);font-size:.64rem;page-break-inside:avoid;background-color:var(--md-admonition-bg-color);border-left:.2rem solid #448aff;border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1)}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-right:.2rem solid #448aff;border-left:none}@media print{.md-typeset .admonition,.md-typeset details{box-shadow:none}}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition .admonition,.md-typeset details .admonition,.md-typeset .admonition details,.md-typeset details details{margin:1em 0}.md-typeset .admonition .md-typeset__scrollwrap,.md-typeset details .md-typeset__scrollwrap{margin:1em -0.6rem}.md-typeset .admonition .md-typeset__table,.md-typeset details .md-typeset__table{padding:0 .6rem}.md-typeset .admonition>.tabbed-set:only-child,.md-typeset details>.tabbed-set:only-child{margin-top:0}.md-typeset .admonition-title,.md-typeset summary{position:relative;margin:0 -0.6rem 0 -0.8rem;padding:.4rem .6rem .4rem 2rem;font-weight:700;background-color:rgba(68,138,255,.1);border-left:.2rem solid #448aff}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{margin:0 -0.8rem 0 -0.6rem;padding:.4rem 2rem .4rem .6rem}html .md-typeset .admonition-title:last-child,html .md-typeset summary:last-child{margin-bottom:0}.md-typeset .admonition-title::before,.md-typeset summary::before{position:absolute;left:.6rem;width:1rem;height:1rem;background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .admonition-title::before,[dir=rtl] .md-typeset summary::before{right:.8rem;left:initial}.md-typeset .admonition-title code,.md-typeset summary code{margin:initial;padding:initial;color:currentColor;background-color:transparent;border-radius:initial;box-shadow:none}.md-typeset .admonition-title+.tabbed-set:last-child,.md-typeset summary+.tabbed-set:last-child{margin-top:0}.md-typeset .admonition.note,.md-typeset details.note{border-color:#448aff}.md-typeset .note>.admonition-title,.md-typeset .note>summary{background-color:rgba(68,138,255,.1);border-color:#448aff}.md-typeset .note>.admonition-title::before,.md-typeset .note>summary::before{background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.abstract,.md-typeset details.abstract,.md-typeset .admonition.tldr,.md-typeset details.tldr,.md-typeset .admonition.summary,.md-typeset details.summary{border-color:#00b0ff}.md-typeset .abstract>.admonition-title,.md-typeset .abstract>summary,.md-typeset .tldr>.admonition-title,.md-typeset .tldr>summary,.md-typeset .summary>.admonition-title,.md-typeset .summary>summary{background-color:rgba(0,176,255,.1);border-color:#00b0ff}.md-typeset .abstract>.admonition-title::before,.md-typeset .abstract>summary::before,.md-typeset .tldr>.admonition-title::before,.md-typeset .tldr>summary::before,.md-typeset .summary>.admonition-title::before,.md-typeset .summary>summary::before{background-color:#00b0ff;-webkit-mask-image:var(--md-admonition-icon--abstract);mask-image:var(--md-admonition-icon--abstract);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.info,.md-typeset details.info,.md-typeset .admonition.todo,.md-typeset details.todo{border-color:#00b8d4}.md-typeset .info>.admonition-title,.md-typeset .info>summary,.md-typeset .todo>.admonition-title,.md-typeset .todo>summary{background-color:rgba(0,184,212,.1);border-color:#00b8d4}.md-typeset .info>.admonition-title::before,.md-typeset .info>summary::before,.md-typeset .todo>.admonition-title::before,.md-typeset .todo>summary::before{background-color:#00b8d4;-webkit-mask-image:var(--md-admonition-icon--info);mask-image:var(--md-admonition-icon--info);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.tip,.md-typeset details.tip,.md-typeset .admonition.important,.md-typeset details.important,.md-typeset .admonition.hint,.md-typeset details.hint{border-color:#00bfa5}.md-typeset .tip>.admonition-title,.md-typeset .tip>summary,.md-typeset .important>.admonition-title,.md-typeset .important>summary,.md-typeset .hint>.admonition-title,.md-typeset .hint>summary{background-color:rgba(0,191,165,.1);border-color:#00bfa5}.md-typeset .tip>.admonition-title::before,.md-typeset .tip>summary::before,.md-typeset .important>.admonition-title::before,.md-typeset .important>summary::before,.md-typeset .hint>.admonition-title::before,.md-typeset .hint>summary::before{background-color:#00bfa5;-webkit-mask-image:var(--md-admonition-icon--tip);mask-image:var(--md-admonition-icon--tip);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.success,.md-typeset details.success,.md-typeset .admonition.done,.md-typeset details.done,.md-typeset .admonition.check,.md-typeset details.check{border-color:#00c853}.md-typeset .success>.admonition-title,.md-typeset .success>summary,.md-typeset .done>.admonition-title,.md-typeset .done>summary,.md-typeset .check>.admonition-title,.md-typeset .check>summary{background-color:rgba(0,200,83,.1);border-color:#00c853}.md-typeset .success>.admonition-title::before,.md-typeset .success>summary::before,.md-typeset .done>.admonition-title::before,.md-typeset .done>summary::before,.md-typeset .check>.admonition-title::before,.md-typeset .check>summary::before{background-color:#00c853;-webkit-mask-image:var(--md-admonition-icon--success);mask-image:var(--md-admonition-icon--success);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.question,.md-typeset details.question,.md-typeset .admonition.faq,.md-typeset details.faq,.md-typeset .admonition.help,.md-typeset details.help{border-color:#64dd17}.md-typeset .question>.admonition-title,.md-typeset .question>summary,.md-typeset .faq>.admonition-title,.md-typeset .faq>summary,.md-typeset .help>.admonition-title,.md-typeset .help>summary{background-color:rgba(100,221,23,.1);border-color:#64dd17}.md-typeset .question>.admonition-title::before,.md-typeset .question>summary::before,.md-typeset .faq>.admonition-title::before,.md-typeset .faq>summary::before,.md-typeset .help>.admonition-title::before,.md-typeset .help>summary::before{background-color:#64dd17;-webkit-mask-image:var(--md-admonition-icon--question);mask-image:var(--md-admonition-icon--question);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.warning,.md-typeset details.warning,.md-typeset .admonition.attention,.md-typeset details.attention,.md-typeset .admonition.caution,.md-typeset details.caution{border-color:#ff9100}.md-typeset .warning>.admonition-title,.md-typeset .warning>summary,.md-typeset .attention>.admonition-title,.md-typeset .attention>summary,.md-typeset .caution>.admonition-title,.md-typeset .caution>summary{background-color:rgba(255,145,0,.1);border-color:#ff9100}.md-typeset .warning>.admonition-title::before,.md-typeset .warning>summary::before,.md-typeset .attention>.admonition-title::before,.md-typeset .attention>summary::before,.md-typeset .caution>.admonition-title::before,.md-typeset .caution>summary::before{background-color:#ff9100;-webkit-mask-image:var(--md-admonition-icon--warning);mask-image:var(--md-admonition-icon--warning);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.failure,.md-typeset details.failure,.md-typeset .admonition.missing,.md-typeset details.missing,.md-typeset .admonition.fail,.md-typeset details.fail{border-color:#ff5252}.md-typeset .failure>.admonition-title,.md-typeset .failure>summary,.md-typeset .missing>.admonition-title,.md-typeset .missing>summary,.md-typeset .fail>.admonition-title,.md-typeset .fail>summary{background-color:rgba(255,82,82,.1);border-color:#ff5252}.md-typeset .failure>.admonition-title::before,.md-typeset .failure>summary::before,.md-typeset .missing>.admonition-title::before,.md-typeset .missing>summary::before,.md-typeset .fail>.admonition-title::before,.md-typeset .fail>summary::before{background-color:#ff5252;-webkit-mask-image:var(--md-admonition-icon--failure);mask-image:var(--md-admonition-icon--failure);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.danger,.md-typeset details.danger,.md-typeset .admonition.error,.md-typeset details.error{border-color:#ff1744}.md-typeset .danger>.admonition-title,.md-typeset .danger>summary,.md-typeset .error>.admonition-title,.md-typeset .error>summary{background-color:rgba(255,23,68,.1);border-color:#ff1744}.md-typeset .danger>.admonition-title::before,.md-typeset .danger>summary::before,.md-typeset .error>.admonition-title::before,.md-typeset .error>summary::before{background-color:#ff1744;-webkit-mask-image:var(--md-admonition-icon--danger);mask-image:var(--md-admonition-icon--danger);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.bug,.md-typeset details.bug{border-color:#f50057}.md-typeset .bug>.admonition-title,.md-typeset .bug>summary{background-color:rgba(245,0,87,.1);border-color:#f50057}.md-typeset .bug>.admonition-title::before,.md-typeset .bug>summary::before{background-color:#f50057;-webkit-mask-image:var(--md-admonition-icon--bug);mask-image:var(--md-admonition-icon--bug);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.example,.md-typeset details.example{border-color:#651fff}.md-typeset .example>.admonition-title,.md-typeset .example>summary{background-color:rgba(101,31,255,.1);border-color:#651fff}.md-typeset .example>.admonition-title::before,.md-typeset .example>summary::before{background-color:#651fff;-webkit-mask-image:var(--md-admonition-icon--example);mask-image:var(--md-admonition-icon--example);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.quote,.md-typeset details.quote,.md-typeset .admonition.cite,.md-typeset details.cite{border-color:#9e9e9e}.md-typeset .quote>.admonition-title,.md-typeset .quote>summary,.md-typeset .cite>.admonition-title,.md-typeset .cite>summary{background-color:rgba(158,158,158,.1);border-color:#9e9e9e}.md-typeset .quote>.admonition-title::before,.md-typeset .quote>summary::before,.md-typeset .cite>.admonition-title::before,.md-typeset .cite>summary::before{background-color:#9e9e9e;-webkit-mask-image:var(--md-admonition-icon--quote);mask-image:var(--md-admonition-icon--quote);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.codehilite .o,.highlight .o,.codehilite .ow,.highlight .ow{color:var(--md-code-hl-operator-color)}.codehilite .p,.highlight .p{color:var(--md-code-hl-punctuation-color)}.codehilite .cpf,.highlight .cpf,.codehilite .l,.highlight .l,.codehilite .s,.highlight .s,.codehilite .sb,.highlight .sb,.codehilite .sc,.highlight .sc,.codehilite .s2,.highlight .s2,.codehilite .si,.highlight .si,.codehilite .s1,.highlight .s1,.codehilite .ss,.highlight .ss{color:var(--md-code-hl-string-color)}.codehilite .cp,.highlight .cp,.codehilite .se,.highlight .se,.codehilite .sh,.highlight .sh,.codehilite .sr,.highlight .sr,.codehilite .sx,.highlight .sx{color:var(--md-code-hl-special-color)}.codehilite .m,.highlight .m,.codehilite .mf,.highlight .mf,.codehilite .mh,.highlight .mh,.codehilite .mi,.highlight .mi,.codehilite .il,.highlight .il,.codehilite .mo,.highlight .mo{color:var(--md-code-hl-number-color)}.codehilite .k,.highlight .k,.codehilite .kd,.highlight .kd,.codehilite .kn,.highlight .kn,.codehilite .kp,.highlight .kp,.codehilite .kr,.highlight .kr,.codehilite .kt,.highlight .kt{color:var(--md-code-hl-keyword-color)}.codehilite .kc,.highlight .kc,.codehilite .n,.highlight .n{color:var(--md-code-hl-name-color)}.codehilite .no,.highlight .no,.codehilite .nb,.highlight .nb,.codehilite .bp,.highlight .bp{color:var(--md-code-hl-constant-color)}.codehilite .nc,.highlight .nc,.codehilite .ne,.highlight .ne,.codehilite .nf,.highlight .nf,.codehilite .nn,.highlight .nn{color:var(--md-code-hl-function-color)}.codehilite .nd,.highlight .nd,.codehilite .ni,.highlight .ni,.codehilite .nl,.highlight .nl,.codehilite .nt,.highlight .nt{color:var(--md-code-hl-keyword-color)}.codehilite .c,.highlight .c,.codehilite .cm,.highlight .cm,.codehilite .c1,.highlight .c1,.codehilite .ch,.highlight .ch,.codehilite .cs,.highlight .cs,.codehilite .sd,.highlight .sd{color:var(--md-code-hl-comment-color)}.codehilite .na,.highlight .na,.codehilite .nv,.highlight .nv,.codehilite .vc,.highlight .vc,.codehilite .vg,.highlight .vg,.codehilite .vi,.highlight .vi{color:var(--md-code-hl-variable-color)}.codehilite .ge,.highlight .ge,.codehilite .gr,.highlight .gr,.codehilite .gh,.highlight .gh,.codehilite .go,.highlight .go,.codehilite .gp,.highlight .gp,.codehilite .gs,.highlight .gs,.codehilite .gu,.highlight .gu,.codehilite .gt,.highlight .gt{color:var(--md-code-hl-generic-color)}.codehilite .gd,.highlight .gd,.codehilite .gi,.highlight .gi{margin:0 -0.125em;padding:0 .125em;border-radius:.1rem}.codehilite .gd,.highlight .gd{background-color:var(--md-typeset-del-color)}.codehilite .gi,.highlight .gi{background-color:var(--md-typeset-ins-color)}.codehilite .hll,.highlight .hll{display:block;margin:0 -1.1764705882em;padding:0 1.1764705882em;background-color:var(--md-code-hl-color)}.codehilitetable,.highlighttable{display:block;overflow:hidden}.codehilitetable tbody,.highlighttable tbody,.codehilitetable td,.highlighttable td{display:block;padding:0}.codehilitetable tr,.highlighttable tr{display:flex}.codehilitetable pre,.highlighttable pre{margin:0}.codehilitetable .linenos,.highlighttable .linenos{padding:.7720588235em 1.1764705882em;padding-right:0;font-size:.85em;background-color:var(--md-code-bg-color);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.codehilitetable .linenodiv,.highlighttable .linenodiv{padding-right:.5882352941em;box-shadow:-0.05rem 0 var(--md-default-fg-color--lighter) inset}.codehilitetable .linenodiv pre,.highlighttable .linenodiv pre{color:var(--md-default-fg-color--light);text-align:right}.codehilitetable .code,.highlighttable .code{flex:1;overflow:hidden}.md-typeset .codehilitetable,.md-typeset .highlighttable{margin:1em 0;direction:ltr;border-radius:.1rem}.md-typeset .codehilitetable code,.md-typeset .highlighttable code{border-radius:0}@media screen and (max-width: 44.9375em){.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -0.8rem}.md-typeset>.codehilite .hll,.md-typeset>.highlight .hll{margin:0 -0.8rem;padding:0 .8rem}.md-typeset>.codehilite code,.md-typeset>.highlight code{border-radius:0}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{margin:1em -0.8rem;border-radius:0}.md-typeset>.codehilitetable .hll,.md-typeset>.highlighttable .hll{margin:0 -0.8rem;padding:0 .8rem}}:root{--md-footnotes-icon: url("data:image/svg+xml;charset=utf-8,")}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset .footnote{color:var(--md-default-fg-color--light);font-size:.64rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{transition:color 125ms}.md-typeset .footnote li:target{color:var(--md-default-fg-color)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover{color:var(--md-accent-fg-color)}.md-typeset .footnote-ref{display:inline-block;pointer-events:initial}.md-typeset .footnote-backref{display:inline-block;color:var(--md-typeset-a-color);font-size:0;vertical-align:text-bottom;transform:translateX(0.25rem);opacity:0;transition:color 250ms,transform 250ms 250ms,opacity 125ms 250ms}[dir=rtl] .md-typeset .footnote-backref{transform:translateX(-0.25rem)}.md-typeset .footnote-backref::before{display:inline-block;width:.8rem;height:.8rem;background-color:currentColor;-webkit-mask-image:var(--md-footnotes-icon);mask-image:var(--md-footnotes-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .footnote-backref::before svg{transform:scaleX(-1)}@media print{.md-typeset .footnote-backref{color:var(--md-typeset-a-color);transform:translateX(0);opacity:1}}.md-typeset .headerlink{display:inline-block;margin-left:.5rem;visibility:hidden;opacity:0;transition:color 250ms,visibility 0ms 500ms,opacity 125ms}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem;margin-left:initial}html body .md-typeset .headerlink{color:var(--md-default-fg-color--lighter)}@media print{.md-typeset .headerlink{display:none}}.md-typeset :hover>.headerlink,.md-typeset :target>.headerlink,.md-typeset .headerlink:focus{visibility:visible;opacity:1;transition:color 250ms,visibility 0ms,opacity 125ms}.md-typeset :target>.headerlink,.md-typeset .headerlink:focus,.md-typeset .headerlink:hover{color:var(--md-accent-fg-color)}.md-typeset :target{scroll-margin-top:3.6rem}.md-typeset h3:target,.md-typeset h2:target,.md-typeset h1:target{scroll-margin-top:3.4rem}.md-typeset h4:target{scroll-margin-top:3.45rem}.md-typeset div.arithmatex{overflow-x:scroll}@media screen and (max-width: 44.9375em){.md-typeset div.arithmatex{margin:0 -0.8rem}}.md-typeset div.arithmatex>*{width:-webkit-min-content;width:-moz-min-content;width:min-content;margin:1em auto !important;padding:0 .8rem;overflow:auto;touch-action:auto}.md-typeset del.critic,.md-typeset ins.critic,.md-typeset .critic.comment{-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:var(--md-typeset-del-color)}.md-typeset ins.critic{background-color:var(--md-typeset-ins-color)}.md-typeset .critic.comment{color:var(--md-code-hl-comment-color)}.md-typeset .critic.comment::before{content:"/* "}.md-typeset .critic.comment::after{content:" */"}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:.8rem;padding-left:.8rem;overflow:auto;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}:root{--md-details-icon: url("data:image/svg+xml;charset=utf-8,")}.md-typeset details{display:block;padding-top:0;overflow:visible}.md-typeset details[open]>summary::after{transform:rotate(90deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-radius:.1rem}.md-typeset details::after{display:table;content:""}.md-typeset summary{display:block;min-height:1rem;padding:.4rem 1.8rem .4rem 2rem;border-top-left-radius:.1rem;border-top-right-radius:.1rem;cursor:pointer}.md-typeset summary:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}[dir=rtl] .md-typeset summary{padding:.4rem 2.2rem .4rem 1.8rem}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary::after{position:absolute;top:.4rem;right:.4rem;width:1rem;height:1rem;background-color:currentColor;-webkit-mask-image:var(--md-details-icon);mask-image:var(--md-details-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transform:rotate(0deg);transition:transform 250ms;content:""}[dir=rtl] .md-typeset summary::after{right:initial;left:.4rem;transform:rotate(180deg)}.md-typeset img.emojione,.md-typeset img.twemoji,.md-typeset img.gemoji{width:1.125em;max-height:100%;vertical-align:-15%}.md-typeset span.twemoji{display:inline-block;height:1.125em;vertical-align:text-top}.md-typeset span.twemoji svg{width:1.125em;max-height:100%;fill:currentColor}.highlight [data-linenos]::before{position:-webkit-sticky;position:sticky;left:-1.1764705882em;float:left;margin-right:1.1764705882em;margin-left:-1.1764705882em;padding-left:1.1764705882em;color:var(--md-default-fg-color--light);background-color:var(--md-code-bg-color);box-shadow:-0.05rem 0 var(--md-default-fg-color--lighter) inset;content:attr(data-linenos);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .keys kbd::before,.md-typeset .keys kbd::after{position:relative;margin:0;color:inherit;-moz-osx-font-smoothing:initial;-webkit-font-smoothing:initial}.md-typeset .keys span{padding:0 .2em;color:var(--md-default-fg-color--light)}.md-typeset .keys .key-alt::before{padding-right:.4em;content:"⎇"}.md-typeset .keys .key-left-alt::before{padding-right:.4em;content:"⎇"}.md-typeset .keys .key-right-alt::before{padding-right:.4em;content:"⎇"}.md-typeset .keys .key-command::before{padding-right:.4em;content:"⌘"}.md-typeset .keys .key-left-command::before{padding-right:.4em;content:"⌘"}.md-typeset .keys .key-right-command::before{padding-right:.4em;content:"⌘"}.md-typeset .keys .key-control::before{padding-right:.4em;content:"⌃"}.md-typeset .keys .key-left-control::before{padding-right:.4em;content:"⌃"}.md-typeset .keys .key-right-control::before{padding-right:.4em;content:"⌃"}.md-typeset .keys .key-meta::before{padding-right:.4em;content:"◆"}.md-typeset .keys .key-left-meta::before{padding-right:.4em;content:"◆"}.md-typeset .keys .key-right-meta::before{padding-right:.4em;content:"◆"}.md-typeset .keys .key-option::before{padding-right:.4em;content:"⌥"}.md-typeset .keys .key-left-option::before{padding-right:.4em;content:"⌥"}.md-typeset .keys .key-right-option::before{padding-right:.4em;content:"⌥"}.md-typeset .keys .key-shift::before{padding-right:.4em;content:"⇧"}.md-typeset .keys .key-left-shift::before{padding-right:.4em;content:"⇧"}.md-typeset .keys .key-right-shift::before{padding-right:.4em;content:"⇧"}.md-typeset .keys .key-super::before{padding-right:.4em;content:"❖"}.md-typeset .keys .key-left-super::before{padding-right:.4em;content:"❖"}.md-typeset .keys .key-right-super::before{padding-right:.4em;content:"❖"}.md-typeset .keys .key-windows::before{padding-right:.4em;content:"⊞"}.md-typeset .keys .key-left-windows::before{padding-right:.4em;content:"⊞"}.md-typeset .keys .key-right-windows::before{padding-right:.4em;content:"⊞"}.md-typeset .keys .key-arrow-down::before{padding-right:.4em;content:"↓"}.md-typeset .keys .key-arrow-left::before{padding-right:.4em;content:"←"}.md-typeset .keys .key-arrow-right::before{padding-right:.4em;content:"→"}.md-typeset .keys .key-arrow-up::before{padding-right:.4em;content:"↑"}.md-typeset .keys .key-backspace::before{padding-right:.4em;content:"⌫"}.md-typeset .keys .key-backtab::before{padding-right:.4em;content:"⇤"}.md-typeset .keys .key-caps-lock::before{padding-right:.4em;content:"⇪"}.md-typeset .keys .key-clear::before{padding-right:.4em;content:"⌧"}.md-typeset .keys .key-context-menu::before{padding-right:.4em;content:"☰"}.md-typeset .keys .key-delete::before{padding-right:.4em;content:"⌦"}.md-typeset .keys .key-eject::before{padding-right:.4em;content:"⏏"}.md-typeset .keys .key-end::before{padding-right:.4em;content:"⤓"}.md-typeset .keys .key-escape::before{padding-right:.4em;content:"⎋"}.md-typeset .keys .key-home::before{padding-right:.4em;content:"⤒"}.md-typeset .keys .key-insert::before{padding-right:.4em;content:"⎀"}.md-typeset .keys .key-page-down::before{padding-right:.4em;content:"⇟"}.md-typeset .keys .key-page-up::before{padding-right:.4em;content:"⇞"}.md-typeset .keys .key-print-screen::before{padding-right:.4em;content:"⎙"}.md-typeset .keys .key-tab::after{padding-left:.4em;content:"⇥"}.md-typeset .keys .key-num-enter::after{padding-left:.4em;content:"⌤"}.md-typeset .keys .key-enter::after{padding-left:.4em;content:"⏎"}.md-typeset .tabbed-content{display:none;order:99;width:100%;box-shadow:0 -0.05rem var(--md-default-fg-color--lightest)}@media print{.md-typeset .tabbed-content{display:block;order:initial}}.md-typeset .tabbed-content>pre:only-child,.md-typeset .tabbed-content>.codehilite:only-child pre,.md-typeset .tabbed-content>.codehilitetable:only-child,.md-typeset .tabbed-content>.highlight:only-child pre,.md-typeset .tabbed-content>.highlighttable:only-child{margin:0}.md-typeset .tabbed-content>pre:only-child>code,.md-typeset .tabbed-content>.codehilite:only-child pre>code,.md-typeset .tabbed-content>.codehilitetable:only-child>code,.md-typeset .tabbed-content>.highlight:only-child pre>code,.md-typeset .tabbed-content>.highlighttable:only-child>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-content>.tabbed-set{margin:0}.md-typeset .tabbed-set{position:relative;display:flex;flex-wrap:wrap;margin:1em 0;border-radius:.1rem}.md-typeset .tabbed-set>input{position:absolute;width:0;height:0;opacity:0}.md-typeset .tabbed-set>input:checked+label{color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color)}.md-typeset .tabbed-set>input:checked+label+.tabbed-content{display:block}.md-typeset .tabbed-set>input:focus+label{outline-style:auto}.md-typeset .tabbed-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.md-typeset .tabbed-set>label{z-index:1;width:auto;padding:.9375em 1.25em .78125em;color:var(--md-default-fg-color--light);font-weight:700;font-size:.64rem;border-bottom:.1rem solid transparent;cursor:pointer;transition:color 250ms}html .md-typeset .tabbed-set>label:hover{color:var(--md-accent-fg-color)}:root{--md-tasklist-icon: url( "data:image/svg+xml;charset=utf-8," );--md-tasklist-icon--checked: url( "data:image/svg+xml;charset=utf-8," )}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em;left:initial}.md-typeset .task-list-control .task-list-indicator::before{position:absolute;top:.15em;left:-1.5em;width:1.25em;height:1.25em;background-color:var(--md-default-fg-color--lightest);-webkit-mask-image:var(--md-tasklist-icon);mask-image:var(--md-tasklist-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .task-list-control .task-list-indicator::before{right:-1.5em;left:initial}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator::before{background-color:#00e676;-webkit-mask-image:var(--md-tasklist-icon--checked);mask-image:var(--md-tasklist-icon--checked)}.md-typeset .task-list-control [type=checkbox]{z-index:-1;opacity:0} diff --git a/0.2/assets/stylesheets/main.ec3b3678.min.css b/0.2/assets/stylesheets/main.ec3b3678.min.css new file mode 100644 index 00000000..8d8bdb96 --- /dev/null +++ b/0.2/assets/stylesheets/main.ec3b3678.min.css @@ -0,0 +1 @@ +@charset "UTF-8";html{box-sizing:border-box;-webkit-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}*,:after,:before{box-sizing:inherit}body{margin:0}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}hr{display:block;box-sizing:initial;height:.05rem;padding:0;overflow:visible;border:0}small{font-size:80%}sub,sup{line-height:1em}img{border-style:none}table{border-collapse:initial;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{margin:0;padding:0;font-size:inherit;background:transparent;border:0}input{border:0;outline:none}:root{--md-default-fg-color:rgba(0,0,0,0.87);--md-default-fg-color--light:rgba(0,0,0,0.54);--md-default-fg-color--lighter:rgba(0,0,0,0.32);--md-default-fg-color--lightest:rgba(0,0,0,0.07);--md-default-bg-color:#fff;--md-default-bg-color--light:hsla(0,0%,100%,0.7);--md-default-bg-color--lighter:hsla(0,0%,100%,0.3);--md-default-bg-color--lightest:hsla(0,0%,100%,0.12);--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7);--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:rgba(83,108,254,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}:root>*{--md-code-fg-color:#36464e;--md-code-bg-color:#f5f5f5;--md-code-hl-color:rgba(255,255,0,0.5);--md-code-hl-number-color:#d52a2a;--md-code-hl-special-color:#db1457;--md-code-hl-function-color:#a846b9;--md-code-hl-constant-color:#6e59d9;--md-code-hl-keyword-color:#3f6ec6;--md-code-hl-string-color:#1c7d4d;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:rgba(255,255,0,0.5);--md-typeset-del-color:rgba(245,80,61,0.15);--md-typeset-ins-color:rgba(11,213,112,0.15);--md-typeset-kbd-color:#fafafa;--md-typeset-kbd-accent-color:#fff;--md-typeset-kbd-border-color:#b8b8b8;--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-footer-fg-color:#fff;--md-footer-fg-color--light:hsla(0,0%,100%,0.7);--md-footer-fg-color--lighter:hsla(0,0%,100%,0.3);--md-footer-bg-color:rgba(0,0,0,0.87);--md-footer-bg-color--dark:rgba(0,0,0,0.32)}.md-icon svg{display:block;width:1.2rem;height:1.2rem;fill:currentColor}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}aside,body,input{color:var(--md-typeset-color);font-feature-settings:"kern","liga";font-family:var(--md-text-font-family,_),-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif}code,kbd,pre{font-feature-settings:"kern";font-family:var(--md-code-font-family,_),SFMono-Regular,Consolas,Menlo,monospace}:root{--md-typeset-table--ascending:url("data:image/svg+xml;charset=utf-8,");--md-typeset-table--descending:url("data:image/svg+xml;charset=utf-8,")}.md-typeset{font-size:.8rem;line-height:1.6;-webkit-print-color-adjust:exact;color-adjust:exact}@media print{.md-typeset{font-size:.68rem}}.md-typeset blockquote,.md-typeset dl,.md-typeset figure,.md-typeset ol,.md-typeset pre,.md-typeset ul{display:flow-root;margin:1em 0}.md-typeset h1{margin:0 0 1.25em;color:var(--md-default-fg-color--light);font-size:2em;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:1.6em 0 .64em;font-size:1.5625em;line-height:1.4}.md-typeset h3{margin:1.6em 0 .8em;font-weight:400;font-size:1.25em;line-height:1.5;letter-spacing:-.01em}.md-typeset h2+h3{margin-top:.8em}.md-typeset h4{margin:1em 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{margin:1.25em 0;color:var(--md-default-fg-color--light);font-weight:700;font-size:.8em;letter-spacing:-.01em}.md-typeset h5{text-transform:uppercase}.md-typeset hr{display:flow-root;margin:1.5em 0;border-bottom:.05rem solid var(--md-default-fg-color--lightest)}.md-typeset a{color:var(--md-typeset-a-color);word-break:break-word}.md-typeset a,.md-typeset a:before{transition:color 125ms}.md-typeset a:focus,.md-typeset a:hover{color:var(--md-accent-fg-color)}.md-typeset code,.md-typeset kbd,.md-typeset pre{color:var(--md-code-fg-color);direction:ltr}@media print{.md-typeset code,.md-typeset kbd,.md-typeset pre{white-space:pre-wrap}}.md-typeset code{padding:0 .2941176471em;font-size:.85em;word-break:break-word;background-color:var(--md-code-bg-color);border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset code:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:initial;padding:initial;background-color:initial;box-shadow:none}.md-typeset a code{color:currentColor}.md-typeset pre{position:relative;line-height:1.4}.md-typeset pre>code{display:block;margin:0;padding:.7720588235em 1.1764705882em;overflow:auto;word-break:normal;box-shadow:none;-webkit-box-decoration-break:slice;box-decoration-break:slice;touch-action:auto;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}.md-typeset pre>code:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-typeset pre>code::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@media screen and (max-width:44.9375em){.md-content>.md-typeset>pre{margin:1em -.8rem}.md-content>.md-typeset>pre code{border-radius:0}}.md-typeset kbd{display:inline-block;padding:0 .6666666667em;color:var(--md-default-fg-color);font-size:.75em;vertical-align:text-top;word-break:break-word;background-color:var(--md-typeset-kbd-color);border-radius:.1rem;box-shadow:0 .1rem 0 .05rem var(--md-typeset-kbd-border-color),0 .1rem 0 var(--md-typeset-kbd-border-color),0 -.1rem .2rem var(--md-typeset-kbd-accent-color) inset}.md-typeset mark{color:inherit;word-break:break-word;background-color:var(--md-typeset-mark-color);-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{text-decoration:none;border-bottom:.05rem dotted var(--md-default-fg-color--light);cursor:help}@media (hover:none){.md-typeset abbr{position:relative}.md-typeset abbr[title]:focus:after,.md-typeset abbr[title]:hover:after{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:absolute;left:0;display:inline-block;width:auto;min-width:-webkit-max-content;min-width:-moz-max-content;min-width:max-content;max-width:80%;margin-top:2em;padding:.2rem .3rem;color:var(--md-default-bg-color);font-size:.7rem;background-color:var(--md-default-fg-color);border-radius:.1rem;content:attr(title)}}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.078125em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.078125em;margin-left:0}.md-typeset blockquote{padding-left:.6rem;color:var(--md-default-fg-color--light);border-left:.2rem solid var(--md-default-fg-color--lighter)}[dir=rtl] .md-typeset blockquote{padding-right:.6rem;padding-left:0;border-right:.2rem solid var(--md-default-fg-color--lighter);border-left:initial}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em;margin-left:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em;margin-left:0}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em;margin-left:0}.md-typeset dd{margin:1em 0 1.5em 1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em;margin-left:0}.md-typeset img,.md-typeset svg{max-width:100%;height:auto}.md-typeset img[align=left],.md-typeset svg[align=left]{margin:1em 1em 1em 0}.md-typeset img[align=right],.md-typeset svg[align=right]{margin:1em 0 1em 1em}.md-typeset img[align]:only-child,.md-typeset svg[align]:only-child{margin-top:0}.md-typeset figure{width:-webkit-fit-content;width:-moz-fit-content;width:fit-content;max-width:100%;margin:0 auto;text-align:center}.md-typeset figure img{display:block}.md-typeset figcaption{max-width:24rem;margin:1em auto 2em;font-style:italic}.md-typeset iframe{max-width:100%}.md-typeset table:not([class]){display:inline-block;max-width:100%;overflow:auto;font-size:.64rem;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1);touch-action:auto}@media print{.md-typeset table:not([class]){display:table}}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td>:first-child,.md-typeset table:not([class]) th>:first-child{margin-top:0}.md-typeset table:not([class]) td>:last-child,.md-typeset table:not([class]) th>:last-child{margin-bottom:0}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{min-width:5rem;padding:.9375em 1.25em;color:var(--md-default-bg-color);vertical-align:top;background-color:var(--md-default-fg-color--light)}.md-typeset table:not([class]) th a{color:inherit}.md-typeset table:not([class]) td{padding:.9375em 1.25em;vertical-align:top;border-top:.05rem solid var(--md-default-fg-color--lightest)}.md-typeset table:not([class]) tr{transition:background-color 125ms}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.035);box-shadow:0 .05rem 0 var(--md-default-bg-color) inset}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset table th[role=columnheader]{cursor:pointer}.md-typeset table th[role=columnheader]:after{display:inline-block;width:1.2em;height:1.2em;margin-left:.5em;vertical-align:sub;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-typeset table th[role=columnheader][aria-sort=ascending]:after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--ascending);mask-image:var(--md-typeset-table--ascending)}.md-typeset table th[role=columnheader][aria-sort=descending]:after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--descending);mask-image:var(--md-typeset-table--descending)}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;touch-action:auto}.md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}@media print{.md-typeset__table{display:block}}html .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{height:100%;overflow-x:hidden;font-size:125%}@media screen and (min-width:100em){html{font-size:137.5%}}@media screen and (min-width:125em){html{font-size:150%}}body{position:relative;display:flex;flex-direction:column;width:100%;min-height:100%;font-size:.5rem;background-color:var(--md-default-bg-color)}@media print{body{display:block}}@media screen and (max-width:59.9375em){body[data-md-state=lock]{position:fixed}}.md-grid{max-width:61rem;margin-right:auto;margin-left:auto}.md-container{display:flex;flex-direction:column;flex-grow:1}@media print{.md-container{display:block}}.md-main{flex-grow:1}.md-main__inner{display:flex;height:100%;margin-top:1.5rem}.md-ellipsis{overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.md-toggle{display:none}.md-option{position:absolute;width:0;height:0;opacity:0}.md-option:checked+label:not([hidden]){display:block}.md-option.focus-visible+label{outline-style:auto}.md-skip{position:fixed;z-index:-1;margin:.5rem;padding:.3rem .5rem;color:var(--md-default-bg-color);font-size:.64rem;background-color:var(--md-default-fg-color);border-radius:.1rem;transform:translateY(.4rem);opacity:0}.md-skip:focus{z-index:10;transform:translateY(0);opacity:1;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 175ms 75ms}@page{margin:25mm}.md-banner{overflow:auto;color:var(--md-footer-fg-color);background-color:var(--md-footer-bg-color)}@media print{.md-banner{display:none}}.md-banner--warning{color:var(--md-default-fg-color);background:var(--md-typeset-mark-color)}.md-banner__inner{margin:.6rem auto;padding:0 .8rem;font-size:.7rem}:root{--md-clipboard-icon:url("data:image/svg+xml;charset=utf-8,")}.md-clipboard{position:absolute;top:.5em;right:.5em;z-index:1;width:1.5em;height:1.5em;color:var(--md-default-fg-color--lightest);border-radius:.1rem;cursor:pointer;transition:color .25s}@media print{.md-clipboard{display:none}}.md-clipboard:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}:hover>.md-clipboard{color:var(--md-default-fg-color--light)}.md-clipboard:focus,.md-clipboard:hover{color:var(--md-accent-fg-color)}.md-clipboard:after{display:block;width:1.125em;height:1.125em;margin:0 auto;background-color:currentColor;-webkit-mask-image:var(--md-clipboard-icon);mask-image:var(--md-clipboard-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-clipboard--inline{cursor:pointer}.md-clipboard--inline code{transition:color .25s,background-color .25s}.md-clipboard--inline:focus code,.md-clipboard--inline:hover code{color:var(--md-accent-fg-color);background-color:var(--md-accent-fg-color--transparent)}.md-content{flex-grow:1;min-width:0}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}@media screen and (min-width:76.25em){.md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}[dir=rtl] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-right:1.2rem;margin-left:.8rem}.md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-right:1.2rem}[dir=rtl] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-right:.8rem;margin-left:1.2rem}}.md-content__inner:before{display:block;height:.4rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__button{float:right;margin:.4rem 0 .4rem .4rem;padding:0}@media print{.md-content__button{display:none}}[dir=rtl] .md-content__button{float:left;margin-right:.4rem;margin-left:0}[dir=rtl] .md-content__button svg{transform:scaleX(-1)}.md-typeset .md-content__button{color:var(--md-default-fg-color--lighter)}.md-content__button svg{display:inline;vertical-align:top}.md-dialog{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:fixed;right:.8rem;bottom:.8rem;left:auto;z-index:3;min-width:11.1rem;padding:.4rem .6rem;background-color:var(--md-default-fg-color);border-radius:.1rem;transform:translateY(100%);opacity:0;transition:transform 0ms .4s,opacity .4s;pointer-events:none}@media print{.md-dialog{display:none}}[dir=rtl] .md-dialog{right:auto;left:.8rem}.md-dialog[data-md-state=open]{transform:translateY(0);opacity:1;transition:transform .4s cubic-bezier(.075,.85,.175,1),opacity .4s;pointer-events:auto}.md-dialog__inner{color:var(--md-default-bg-color);font-size:.7rem}.md-typeset .md-button{display:inline-block;padding:.625em 2em;color:var(--md-primary-fg-color);font-weight:700;border:.1rem solid;border-radius:.1rem;transition:color 125ms,background-color 125ms,border-color 125ms}.md-typeset .md-button--primary{color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color);border-color:var(--md-primary-fg-color)}.md-typeset .md-button:focus,.md-typeset .md-button:hover{color:var(--md-accent-bg-color);background-color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color)}.md-typeset .md-input{height:1.8rem;padding:0 .6rem;font-size:.8rem;border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 .025rem .05rem rgba(0,0,0,.1);transition:box-shadow .25s}.md-typeset .md-input:focus,.md-typeset .md-input:hover{box-shadow:0 .4rem 1rem rgba(0,0,0,.15),0 .025rem .05rem rgba(0,0,0,.15)}.md-typeset .md-input--stretch{width:100%}.md-header{position:-webkit-sticky;position:sticky;top:0;right:0;left:0;z-index:3;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color);box-shadow:0 0 .2rem transparent,0 .2rem .4rem transparent}@media print{.md-header{display:none}}.md-header[data-md-state=shadow]{box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2);transition:transform .25s cubic-bezier(.1,.7,.1,1),box-shadow .25s}.md-header[data-md-state=hidden]{transform:translateY(-100%);transition:transform .25s cubic-bezier(.8,0,.6,1),box-shadow .25s}.md-header .focus-visible{outline-color:currentColor}.md-header__inner{display:flex;align-items:center;padding:0 .2rem}.md-header__button{position:relative;z-index:1;margin:.2rem;padding:.4rem;color:currentColor;vertical-align:middle;cursor:pointer;transition:opacity .25s}.md-header__button:hover{opacity:.7}.md-header__button:not([hidden]){display:inline-block}.md-header__button:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}.md-header__button.md-logo{margin:.2rem;padding:.4rem}@media screen and (max-width:76.1875em){.md-header__button.md-logo{display:none}}.md-header__button.md-logo img,.md-header__button.md-logo svg{display:block;width:1.2rem;height:1.2rem;fill:currentColor}@media screen and (min-width:60em){.md-header__button[for=__search]{display:none}}.no-js .md-header__button[for=__search]{display:none}[dir=rtl] .md-header__button[for=__search] svg{transform:scaleX(-1)}@media screen and (min-width:76.25em){.md-header__button[for=__drawer]{display:none}}.md-header__topic{position:absolute;display:flex;max-width:100%;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-header__topic+.md-header__topic{z-index:-1;transform:translateX(1.25rem);opacity:0;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;pointer-events:none}[dir=rtl] .md-header__topic+.md-header__topic{transform:translateX(-1.25rem)}.md-header__title{flex-grow:1;height:2.4rem;margin-right:.4rem;margin-left:1rem;font-size:.9rem;line-height:2.4rem}.md-header__title[data-md-state=active] .md-header__topic{z-index:-1;transform:translateX(-1.25rem);opacity:0;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;pointer-events:none}[dir=rtl] .md-header__title[data-md-state=active] .md-header__topic{transform:translateX(1.25rem)}.md-header__title[data-md-state=active] .md-header__topic+.md-header__topic{z-index:0;transform:translateX(0);opacity:1;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;pointer-events:auto}.md-header__title>.md-header__ellipsis{position:relative;width:100%;height:100%}.md-header__option{display:flex;flex-shrink:0;max-width:100%;white-space:nowrap;transition:max-width 0ms .25s,opacity .25s .25s}[data-md-toggle=search]:checked~.md-header .md-header__option{max-width:0;opacity:0;transition:max-width 0ms,opacity 0ms}.md-header__source{display:none}@media screen and (min-width:60em){.md-header__source{display:block;width:11.7rem;max-width:11.7rem;margin-left:1rem}[dir=rtl] .md-header__source{margin-right:1rem;margin-left:0}}@media screen and (min-width:76.25em){.md-header__source{margin-left:1.4rem}[dir=rtl] .md-header__source{margin-right:1.4rem}}.md-footer{color:var(--md-footer-fg-color);background-color:var(--md-footer-bg-color)}@media print{.md-footer{display:none}}.md-footer__inner{padding:.2rem;overflow:auto}.md-footer__link{display:flex;padding-top:1.4rem;padding-bottom:.4rem;transition:opacity .25s}@media screen and (min-width:45em){.md-footer__link{width:50%}}.md-footer__link:focus,.md-footer__link:hover{opacity:.7}.md-footer__link--prev{float:left}@media screen and (max-width:44.9375em){.md-footer__link--prev{width:25%}.md-footer__link--prev .md-footer__title{display:none}}[dir=rtl] .md-footer__link--prev{float:right}[dir=rtl] .md-footer__link--prev svg{transform:scaleX(-1)}.md-footer__link--next{float:right;text-align:right}@media screen and (max-width:44.9375em){.md-footer__link--next{width:75%}}[dir=rtl] .md-footer__link--next{float:left;text-align:left}[dir=rtl] .md-footer__link--next svg{transform:scaleX(-1)}.md-footer__title{position:relative;flex-grow:1;max-width:calc(100% - 2.4rem);padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-footer__button{margin:.2rem;padding:.4rem}.md-footer__direction{position:absolute;right:0;left:0;margin-top:-1rem;padding:0 1rem;font-size:.64rem;opacity:.7}.md-footer-meta{background-color:var(--md-footer-bg-color--dark)}.md-footer-meta__inner{display:flex;flex-wrap:wrap;justify-content:space-between;padding:.2rem}html .md-footer-meta.md-typeset a{color:var(--md-footer-fg-color--light)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:var(--md-footer-fg-color)}.md-footer-copyright{width:100%;margin:auto .6rem;padding:.4rem 0;color:var(--md-footer-fg-color--lighter);font-size:.64rem}@media screen and (min-width:45em){.md-footer-copyright{width:auto}}.md-footer-copyright__highlight{color:var(--md-footer-fg-color--light)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}@media screen and (min-width:45em){.md-footer-social{padding:.6rem 0}}.md-footer-social__link{display:inline-block;width:1.6rem;height:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-footer-social__link svg{max-height:.8rem;vertical-align:-25%;fill:currentColor}:root{--md-nav-icon--prev:url("data:image/svg+xml;charset=utf-8,");--md-nav-icon--next:url("data:image/svg+xml;charset=utf-8,");--md-toc-icon:url("data:image/svg+xml;charset=utf-8,")}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;padding:0 .6rem;overflow:hidden;font-weight:700;text-overflow:ellipsis}.md-nav__title .md-nav__button{display:none}.md-nav__title .md-nav__button img{width:auto;height:100%}.md-nav__title .md-nav__button.md-logo img,.md-nav__title .md-nav__button.md-logo svg{display:block;width:2.4rem;height:2.4rem;fill:currentColor}.md-nav__list{margin:0;padding:0}.md-nav__item{display:block;padding:0 .6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-right:.6rem;padding-left:0}.md-nav__link{display:block;margin-top:.625em;overflow:hidden;text-overflow:ellipsis;cursor:pointer;transition:color 125ms;scroll-snap-align:start}.md-nav__link[data-md-state=blur]{color:var(--md-default-fg-color--light)}.md-nav__link--container{display:flex}.md-nav__link--container>:first-child{flex-grow:1}.md-nav__link--container>*{cursor:inherit}.md-nav__item .md-nav__link--active{color:var(--md-typeset-a-color)}.md-nav__link:focus,.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link[for=__toc]{display:none}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{display:block;width:100%;height:100%;-webkit-mask-image:var(--md-toc-icon);mask-image:var(--md-toc-icon);background-color:currentColor}.md-nav--primary .md-nav__link[for=__toc]~.md-nav,.md-nav__source{display:none}@media screen and (max-width:76.1875em){.md-nav--primary,.md-nav--primary .md-nav{position:absolute;top:0;right:0;left:0;z-index:1;display:flex;flex-direction:column;height:100%;background-color:var(--md-default-bg-color)}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}.md-nav--primary .md-nav__title{position:relative;height:5.6rem;padding:3rem .8rem .2rem;color:var(--md-default-fg-color--light);font-weight:400;line-height:2.4rem;white-space:nowrap;background-color:var(--md-default-fg-color--lightest);cursor:pointer}.md-nav--primary .md-nav__title .md-nav__icon{position:absolute;top:.4rem;left:.4rem;display:block;width:1.2rem;height:1.2rem;margin:.2rem}[dir=rtl] .md-nav--primary .md-nav__title .md-nav__icon{right:.4rem;left:auto}.md-nav--primary .md-nav__title .md-nav__icon:after{display:block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--prev);mask-image:var(--md-nav-icon--prev);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-nav--primary .md-nav__title~.md-nav__list{overflow-y:auto;background-color:var(--md-default-bg-color);box-shadow:0 .05rem 0 var(--md-default-fg-color--lightest) inset;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory;touch-action:pan-y}.md-nav--primary .md-nav__title~.md-nav__list>:first-child{border-top:0}.md-nav--primary .md-nav__title[for=__drawer]{color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color)}.md-nav--primary .md-nav__title .md-logo{position:absolute;top:.2rem;left:.2rem;display:block;margin:.2rem;padding:.4rem}[dir=rtl] .md-nav--primary .md-nav__title .md-logo{right:.2rem;left:auto}.md-nav--primary .md-nav__list{flex:1}.md-nav--primary .md-nav__item{padding:0;border-top:.05rem solid var(--md-default-fg-color--lightest)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__item--active>.md-nav__link{color:var(--md-typeset-a-color)}.md-nav--primary .md-nav__item--active>.md-nav__link:focus,.md-nav--primary .md-nav__item--active>.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link .md-nav__icon{position:absolute;top:50%;right:.6rem;width:1.2rem;height:1.2rem;margin-top:-.6rem;color:inherit;font-size:1.2rem}[dir=rtl] .md-nav--primary .md-nav__link .md-nav__icon{right:auto;left:.6rem}.md-nav--primary .md-nav__link .md-nav__icon:after{display:block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-nav--primary .md-nav__icon:after{transform:scale(-1)}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:initial}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem;padding-left:0}.md-nav--secondary{background-color:initial}.md-nav__toggle~.md-nav{display:flex;transform:translateX(100%);opacity:0;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity 125ms 50ms}[dir=rtl] .md-nav__toggle~.md-nav{transform:translateX(-100%)}.md-nav__toggle:checked~.md-nav{transform:translateX(0);opacity:1;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 125ms 125ms}.md-nav__toggle:checked~.md-nav>.md-nav__list{-webkit-backface-visibility:hidden;backface-visibility:hidden}}@media screen and (max-width:59.9375em){.md-nav--primary .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__link[for=__toc]{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--primary .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:flex}.md-nav__source{display:block;padding:0 .2rem;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color--dark)}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-nav--integrated .md-nav__link[for=__toc]{display:block;padding-right:2.4rem;scroll-snap-align:none}[dir=rtl] .md-nav--integrated .md-nav__link[for=__toc]{padding-right:.8rem;padding-left:2.4rem}.md-nav--integrated .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--integrated .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{display:flex}}@media screen and (min-width:60em){.md-nav--secondary .md-nav__title[for=__toc]{scroll-snap-align:start}.md-nav--secondary .md-nav__title .md-nav__icon{display:none}}@media screen and (min-width:76.25em){.md-nav{transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav--primary .md-nav__title[for=__drawer]{scroll-snap-align:start}.md-nav--primary .md-nav__title .md-nav__icon,.md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav,.md-nav__toggle:indeterminate~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--section{display:block;margin:1.25em 0}.md-nav__item--section:last-child{margin-bottom:0}.md-nav__item--section>.md-nav__link{font-weight:700;pointer-events:none}.md-nav__item--section>.md-nav__link>*{pointer-events:auto}.md-nav__item--section>.md-nav__link .md-icon{display:none}.md-nav__item--section>.md-nav{display:block}.md-nav__item--section>.md-nav>.md-nav__list>.md-nav__item{padding:0}.md-nav__icon{float:right;width:.9rem;height:.9rem;transition:transform .25s}[dir=rtl] .md-nav__icon{float:left;transform:rotate(180deg)}.md-nav__icon:after{display:inline-block;width:100%;height:100%;vertical-align:-.1rem;background-color:currentColor;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link .md-nav__icon,.md-nav__item--nested .md-nav__toggle:indeterminate~.md-nav__link .md-nav__icon{transform:rotate(90deg)}.md-nav--lifted>.md-nav__list>.md-nav__item,.md-nav--lifted>.md-nav__list>.md-nav__item--nested,.md-nav--lifted>.md-nav__title{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active{display:block;padding:0}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav>.md-nav__title{display:block;padding:0 .6rem;pointer-events:none;scroll-snap-align:start}.md-nav--lifted>.md-nav__list>.md-nav__item>.md-nav__item{padding-right:.6rem}.md-nav--lifted .md-nav[data-md-level="1"]{display:block}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{display:block;margin-bottom:1.25em;border-left:.05rem solid var(--md-primary-fg-color)}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav>.md-nav__title{display:none}}:root{--md-search-result-icon:url("data:image/svg+xml;charset=utf-8,")}.md-search{position:relative}@media screen and (min-width:60em){.md-search{padding:.2rem 0}}.no-js .md-search{display:none}.md-search__overlay{z-index:1;opacity:0}@media screen and (max-width:59.9375em){.md-search__overlay{position:absolute;top:.2rem;left:-2.2rem;width:2rem;height:2rem;overflow:hidden;background-color:var(--md-default-bg-color);border-radius:1rem;transform-origin:center;transition:transform .3s .1s,opacity .2s .2s;pointer-events:none}[dir=rtl] .md-search__overlay{right:-2.2rem;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{opacity:1;transition:transform .4s,opacity .1s}}@media screen and (min-width:60em){.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;background-color:rgba(0,0,0,.54);cursor:pointer;transition:width 0ms .25s,height 0ms .25s,opacity .25s}[dir=rtl] .md-search__overlay{right:0;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:200vh;opacity:1;transition:width 0ms,height 0ms,opacity .25s}}@media screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(45)}}@media screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(60)}}@media screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(75)}}.md-search__inner{-webkit-backface-visibility:hidden;backface-visibility:hidden}@media screen and (max-width:59.9375em){.md-search__inner{position:fixed;top:0;left:100%;z-index:2;width:100%;height:100%;transform:translateX(5%);opacity:0;transition:right 0ms .3s,left 0ms .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;transform:translateX(0);opacity:1;transition:right 0ms 0ms,left 0ms 0ms,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{right:0;left:auto}html [dir=rtl] .md-search__inner{right:100%;left:auto;transform:translateX(-5%)}}@media screen and (min-width:60em){.md-search__inner{position:relative;float:right;width:11.7rem;padding:.1rem 0;transition:width .25s cubic-bezier(.1,.7,.1,1)}[dir=rtl] .md-search__inner{float:left}}@media screen and (min-width:60em) and (max-width:76.1875em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}}@media screen and (min-width:76.25em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}}.md-search__form{position:relative;z-index:2;height:2.4rem;background-color:var(--md-default-bg-color);box-shadow:0 0 .6rem transparent;transition:color .25s,background-color .25s}@media screen and (min-width:60em){.md-search__form{height:1.8rem;background-color:rgba(0,0,0,.26);border-radius:.1rem}.md-search__form:hover{background-color:hsla(0,0%,100%,.12)}}[data-md-toggle=search]:checked~.md-header .md-search__form{color:var(--md-default-fg-color);background-color:var(--md-default-bg-color);border-radius:.1rem .1rem 0 0;box-shadow:0 0 .6rem rgba(0,0,0,.07)}.md-search__input{position:relative;z-index:2;width:100%;height:100%;padding:0 2.2rem 0 3.6rem;font-size:.9rem;text-overflow:ellipsis;background:transparent}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s;transition:color .25s}.md-search__input::-moz-placeholder{-moz-transition:color .25s;transition:color .25s}.md-search__input::-ms-input-placeholder{-ms-transition:color .25s;transition:color .25s}.md-search__input::placeholder{transition:color .25s}.md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:var(--md-default-fg-color--light)}.md-search__input::-ms-clear{display:none}@media screen and (max-width:59.9375em){.md-search__input{width:100%;height:2.4rem;font-size:.9rem}}@media screen and (min-width:60em){.md-search__input{padding-left:2.2rem;color:inherit;font-size:.8rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input::-webkit-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::placeholder{color:var(--md-primary-bg-color--light)}.md-search__input+.md-search__icon{color:var(--md-primary-bg-color)}[data-md-toggle=search]:checked~.md-header .md-search__input{text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:var(--md-default-fg-color--light)}}.md-search__icon{display:inline-block;width:1.2rem;height:1.2rem;cursor:pointer;transition:color .25s,opacity .25s}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{position:absolute;top:.3rem;left:.5rem;z-index:2}[dir=rtl] .md-search__icon[for=__search]{right:.5rem;left:auto}[dir=rtl] .md-search__icon[for=__search] svg{transform:scaleX(-1)}@media screen and (max-width:59.9375em){.md-search__icon[for=__search]{top:.6rem;left:.8rem}[dir=rtl] .md-search__icon[for=__search]{right:.8rem;left:auto}.md-search__icon[for=__search] svg:first-child{display:none}}@media screen and (min-width:60em){.md-search__icon[for=__search]{pointer-events:none}.md-search__icon[for=__search] svg:last-child{display:none}}.md-search__options{position:absolute;top:.3rem;right:.5rem;z-index:2;pointer-events:none}[dir=rtl] .md-search__options{right:auto;left:.5rem}@media screen and (max-width:59.9375em){.md-search__options{top:.6rem;right:.8rem}[dir=rtl] .md-search__options{right:auto;left:.8rem}}.md-search__options>*{margin-left:.2rem;color:var(--md-default-fg-color--light);transform:scale(.75);opacity:0;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-search__options>:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>*{transform:scale(1);opacity:1;pointer-events:auto}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>:hover{opacity:.7}.md-search__suggest{position:absolute;top:0;display:flex;align-items:center;width:100%;height:100%;padding:0 2.2rem 0 3.6rem;color:var(--md-default-fg-color--lighter);font-size:.9rem;white-space:nowrap;opacity:0;transition:opacity 50ms}[dir=rtl] .md-search__suggest{padding:0 3.6rem 0 2.2rem}@media screen and (min-width:60em){.md-search__suggest{padding-left:2.2rem;font-size:.8rem}[dir=rtl] .md-search__suggest{padding-right:2.2rem}}[data-md-toggle=search]:checked~.md-header .md-search__suggest{opacity:1;transition:opacity .3s .1s}.md-search__output{position:absolute;z-index:1;width:100%;overflow:hidden;border-radius:0 0 .1rem .1rem}@media screen and (max-width:59.9375em){.md-search__output{top:2.4rem;bottom:0}}@media screen and (min-width:60em){.md-search__output{top:1.9rem;opacity:0;transition:opacity .4s}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}}.md-search__scrollwrap{height:100%;overflow-y:auto;background-color:var(--md-default-bg-color);-webkit-backface-visibility:hidden;backface-visibility:hidden;touch-action:pan-y}@media (-webkit-max-device-pixel-ratio:1), (max-resolution:1dppx){.md-search__scrollwrap{transform:translateZ(0)}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap{width:23.4rem}}@media screen and (min-width:76.25em){.md-search__scrollwrap{width:34.4rem}}@media screen and (min-width:60em){.md-search__scrollwrap{max-height:0;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-search__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}}.md-search-result{color:var(--md-default-fg-color);word-break:break-word}.md-search-result__meta{padding:0 .8rem;color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.8rem;background-color:var(--md-default-fg-color--lightest);scroll-snap-align:start}@media screen and (min-width:60em){.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem;padding-left:0}}.md-search-result__list{margin:0;padding:0;list-style:none}.md-search-result__item{box-shadow:0 -.05rem 0 var(--md-default-fg-color--lightest)}.md-search-result__item:first-child{box-shadow:none}.md-search-result__link{display:block;outline:none;transition:background-color .25s;scroll-snap-align:start}.md-search-result__link:focus,.md-search-result__link:hover{background-color:var(--md-accent-fg-color--transparent)}.md-search-result__link:last-child p:last-child{margin-bottom:.6rem}.md-search-result__more summary{display:block;padding:.75em .8rem;color:var(--md-typeset-a-color);font-size:.64rem;outline:0;cursor:pointer;transition:color .25s,background-color .25s;scroll-snap-align:start}@media screen and (min-width:60em){.md-search-result__more summary{padding-left:2.2rem}[dir=rtl] .md-search-result__more summary{padding-right:2.2rem;padding-left:.8rem}}.md-search-result__more summary:focus,.md-search-result__more summary:hover{color:var(--md-accent-fg-color);background-color:var(--md-accent-fg-color--transparent)}.md-search-result__more summary::-webkit-details-marker,.md-search-result__more summary::marker{display:none}.md-search-result__more summary~*>*{opacity:.65}.md-search-result__article{position:relative;padding:0 .8rem;overflow:hidden}@media screen and (min-width:60em){.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem;padding-left:.8rem}}.md-search-result__article--document .md-search-result__title{margin:.55rem 0;font-weight:400;font-size:.8rem;line-height:1.4}.md-search-result__icon{position:absolute;left:0;width:1.2rem;height:1.2rem;margin:.5rem;color:var(--md-default-fg-color--light)}@media screen and (max-width:59.9375em){.md-search-result__icon{display:none}}.md-search-result__icon:after{display:inline-block;width:100%;height:100%;background-color:currentColor;-webkit-mask-image:var(--md-search-result-icon);mask-image:var(--md-search-result-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-search-result__icon{right:0;left:auto}[dir=rtl] .md-search-result__icon:after{transform:scaleX(-1)}.md-search-result__title{margin:.5em 0;font-weight:700;font-size:.64rem;line-height:1.6}.md-search-result__teaser{display:-webkit-box;max-height:2rem;margin:.5em 0;overflow:hidden;color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.6;text-overflow:ellipsis;-webkit-box-orient:vertical;-webkit-line-clamp:2}@media screen and (max-width:44.9375em){.md-search-result__teaser{max-height:3rem;-webkit-line-clamp:3}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-search-result__teaser{max-height:3rem;-webkit-line-clamp:3}}.md-search-result__teaser mark{text-decoration:underline;background-color:initial}.md-search-result__terms{margin:.5em 0;font-size:.64rem;font-style:italic}.md-search-result mark{color:var(--md-accent-fg-color);background-color:initial}.md-select{position:relative;z-index:1}.md-select__inner{position:absolute;top:calc(100% - .2rem);left:50%;max-height:0;margin-top:.2rem;color:var(--md-default-fg-color);background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);transform:translate3d(-50%,.3rem,0);opacity:0;transition:transform .25s 375ms,opacity .25s .25s,max-height 0ms .5s}.md-select:focus-within .md-select__inner,.md-select:hover .md-select__inner{max-height:10rem;transform:translate3d(-50%,0,0);opacity:1;transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height .25s}.md-select__inner:after{position:absolute;top:0;left:50%;width:0;height:0;margin-top:-.2rem;margin-left:-.2rem;border-left:.2rem solid transparent;border-right:.2rem solid transparent;border-top:0;border-bottom:.2rem solid transparent;border-bottom-color:var(--md-default-bg-color);content:""}.md-select__list{max-height:inherit;margin:0;padding:0;overflow:auto;font-size:.8rem;list-style-type:none;border-radius:.1rem}.md-select__item{line-height:1.8rem}.md-select__link{display:block;width:100%;padding-right:1.2rem;padding-left:.6rem;cursor:pointer;transition:background-color .25s,color .25s;scroll-snap-align:start}[dir=rtl] .md-select__link{padding-right:.6rem;padding-left:1.2rem}.md-select__link:focus,.md-select__link:hover{background-color:var(--md-default-fg-color--lightest)}.md-sidebar{position:-webkit-sticky;position:sticky;top:2.4rem;flex-shrink:0;align-self:flex-start;width:12.1rem;padding:1.2rem 0}@media print{.md-sidebar{display:none}}@media screen and (max-width:76.1875em){.md-sidebar--primary{position:fixed;top:0;left:-12.1rem;z-index:4;display:block;width:12.1rem;height:100%;background-color:var(--md-default-bg-color);transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s}[dir=rtl] .md-sidebar--primary{right:-12.1rem;left:auto}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{transform:translateX(-12.1rem)}.md-sidebar--primary .md-sidebar__scrollwrap{position:absolute;top:0;right:0;bottom:0;left:0;margin:0;-ms-scroll-snap-type:none;scroll-snap-type:none;overflow:hidden}}@media screen and (min-width:76.25em){.md-sidebar{height:0}.no-js .md-sidebar{height:auto}}.md-sidebar--secondary{display:none;order:2}@media screen and (min-width:60em){.md-sidebar--secondary{height:0}.no-js .md-sidebar--secondary{height:auto}.md-sidebar--secondary:not([hidden]){display:block}.md-sidebar--secondary .md-sidebar__scrollwrap{touch-action:pan-y}}.md-sidebar__scrollwrap{margin:0 .2rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden;scrollbar-width:thin;scrollbar-color:var(--md-default-fg-color--lighter) transparent}.md-sidebar__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@media screen and (max-width:76.1875em){.md-overlay{position:fixed;top:0;z-index:4;width:0;height:0;background-color:rgba(0,0,0,.54);opacity:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s}[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;opacity:1;transition:width 0ms,height 0ms,opacity .25s}}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@-webkit-keyframes md-source__fact--done{0%{transform:translateY(100%);opacity:0}50%{opacity:0}to{transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{transform:translateY(100%);opacity:0}50%{opacity:0}to{transform:translateY(0);opacity:1}}:root{--md-source-forks-icon:url("data:image/svg+xml;charset=utf-8,");--md-source-repositories-icon:url("data:image/svg+xml;charset=utf-8,");--md-source-stars-icon:url("data:image/svg+xml;charset=utf-8,");--md-source-version-icon:url("data:image/svg+xml;charset=utf-8,")}.md-source{display:block;font-size:.65rem;line-height:1.2;white-space:nowrap;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:opacity .25s}.md-source:hover{opacity:.7}.md-source__icon{display:inline-block;width:2rem;height:2.4rem;vertical-align:middle}.md-source__icon svg{margin-top:.6rem;margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem;margin-left:0}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem;margin-left:0;padding-right:2rem;padding-left:0}.md-source__repository{display:inline-block;max-width:calc(100% - 1.2rem);margin-left:.6rem;overflow:hidden;text-overflow:ellipsis;vertical-align:middle}.md-source__facts{margin:.1rem 0 0;padding:0;overflow:hidden;font-size:.55rem;list-style-type:none;opacity:.75}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{display:inline-block}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{display:inline-block;width:.6rem;height:.6rem;margin-right:.1rem;vertical-align:text-top;background-color:currentColor;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}.md-source__fact:nth-child(1n+2):before{margin-left:.4rem}[dir=rtl] .md-source__fact{margin-right:0;margin-left:.1rem}[dir=rtl] .md-source__fact:nth-child(1n+2):before{margin-right:.4rem;margin-left:0}.md-source__fact--version:before{-webkit-mask-image:var(--md-source-version-icon);mask-image:var(--md-source-version-icon)}.md-source__fact--stars:before{-webkit-mask-image:var(--md-source-stars-icon);mask-image:var(--md-source-stars-icon)}.md-source__fact--forks:before{-webkit-mask-image:var(--md-source-forks-icon);mask-image:var(--md-source-forks-icon)}.md-source__fact--repositories:before{-webkit-mask-image:var(--md-source-repositories-icon);mask-image:var(--md-source-repositories-icon)}.md-tabs{width:100%;overflow:auto;color:var(--md-primary-bg-color);background-color:var(--md-primary-fg-color)}@media print{.md-tabs{display:none}}@media screen and (max-width:76.1875em){.md-tabs{display:none}}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs__list{margin:0 0 0 .2rem;padding:0;white-space:nowrap;list-style:none;contain:content}[dir=rtl] .md-tabs__list{margin-right:.2rem;margin-left:0}.md-tabs__item{display:inline-block;height:2.4rem;padding-right:.6rem;padding-left:.6rem}.md-tabs__link{display:block;margin-top:.8rem;font-size:.7rem;-webkit-backface-visibility:hidden;backface-visibility:hidden;opacity:.7;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s}.md-tabs__link--active,.md-tabs__link:focus,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{transition-delay:20ms}.md-tabs__item:nth-child(3) .md-tabs__link{transition-delay:40ms}.md-tabs__item:nth-child(4) .md-tabs__link{transition-delay:60ms}.md-tabs__item:nth-child(5) .md-tabs__link{transition-delay:80ms}.md-tabs__item:nth-child(6) .md-tabs__link{transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{transition-delay:.3s}.md-tabs[data-md-state=hidden] .md-tabs__link{transform:translateY(50%);opacity:0;transition:transform 0ms .1s,opacity .1s}@-webkit-keyframes md-annotation--pulse{0%{box-shadow:0 0 0 0 var(--md-default-fg-color--lightest)}75%{box-shadow:0 0 0 .625em transparent}to{box-shadow:0 0 0 0 transparent}}@keyframes md-annotation--pulse{0%{box-shadow:0 0 0 0 var(--md-default-fg-color--lightest)}75%{box-shadow:0 0 0 .625em transparent}to{box-shadow:0 0 0 0 transparent}}.md-tooltip{position:absolute;z-index:0;max-height:0;overflow:auto;color:var(--md-default-fg-color);background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);transform:translateY(.4rem);-webkit-backface-visibility:hidden;backface-visibility:hidden;opacity:0;transition:transform .25s 375ms,opacity .25s,max-height 0ms .25s,z-index .25s}.md-tooltip__inner{padding:.8rem;font-size:.64rem}.md-tooltip__inner>:first-child{margin-top:0}.md-tooltip__inner>:last-child{margin-bottom:0}:focus-within>.md-tooltip,:focus>.md-tooltip{max-height:1000%;transform:translateY(0);opacity:1;transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height .25s 0ms,z-index 0ms}:focus-within>.md-tooltip--end,:focus>.md-tooltip--end{transform:translate(-100%)}:focus-within>.md-tooltip--center,:focus>.md-tooltip--center{transform:translate(-50%)}.focus-visible>.md-tooltip{outline:var(--md-accent-fg-color) auto}.md-tooltip--end{transform:translate(-100%,.4rem)}.md-tooltip--center{transform:translate(-50%,.4rem)}.md-annotation{white-space:normal;outline:none}.md-annotation:focus-within>*{z-index:2}.md-annotation:not([hidden]){display:inline-block}.md-annotation__index{position:relative;z-index:0;display:inline-block;min-width:1.4em;padding:0 .375em;color:var(--md-accent-bg-color);text-align:center;background-color:var(--md-default-fg-color--lighter);border-radius:1.25em;cursor:pointer;transition:background-color .25s,z-index .25s;-webkit-animation:md-annotation--pulse 2s infinite;animation:md-annotation--pulse 2s infinite;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}:focus-within>.md-annotation__index{transition:background-color .25s,z-index 0ms;-webkit-animation:none;animation:none}:focus-within>.md-annotation__index,:hover>.md-annotation__index{background-color:var(--md-accent-fg-color)}.md-annotation .md-tooltip{min-width:16rem;max-width:60%;margin:-1.1764705882em .7352941176em 0}.md-annotation .md-tooltip--center{margin-top:.7352941176em}.md-top{position:-webkit-sticky;position:sticky;bottom:.4rem;z-index:1;float:right;margin:-2.8rem .4rem .4rem;padding:.4rem;color:var(--md-primary-bg-color);background:var(--md-primary-fg-color);border-radius:100%;outline:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 .025rem .05rem rgba(0,0,0,.1);transform:translateY(0);transition:opacity 125ms,transform 125ms cubic-bezier(.4,0,.2,1),background-color 125ms}[dir=rtl] .md-top{float:left}.md-top[data-md-state=hidden]{transform:translateY(-.2rem);opacity:0}.md-top:focus,.md-top:hover{background:var(--md-accent-fg-color);transform:scale(1.1)}:root{--md-version-icon:url("data:image/svg+xml;charset=utf-8,")}.md-version{flex-shrink:0;height:2.4rem;font-size:.8rem}.md-version__current{position:relative;top:.05rem;margin-right:.4rem;margin-left:1.4rem}[dir=rtl] .md-version__current{margin-right:1.4rem;margin-left:.4rem}.md-version__current:after{display:inline-block;width:.4rem;height:.6rem;margin-left:.4rem;background-color:currentColor;-webkit-mask-image:var(--md-version-icon);mask-image:var(--md-version-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;content:""}[dir=rtl] .md-version__current:after{margin-right:.4rem;margin-left:0}.md-version__list{position:absolute;top:.15rem;z-index:1;max-height:1.8rem;margin:.2rem .8rem;padding:0;overflow:auto;color:var(--md-default-fg-color);list-style-type:none;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);opacity:0;transition:max-height 0ms .5s,opacity .25s .25s;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory}.md-version__list:focus-within,.md-version__list:hover{max-height:10rem;opacity:1;transition:max-height .25s,opacity .25s}.md-version__item{line-height:1.8rem}.md-version__link{display:block;width:100%;padding-right:1.2rem;padding-left:.6rem;white-space:nowrap;cursor:pointer;transition:color .25s,background-color .25s;scroll-snap-align:start}[dir=rtl] .md-version__link{padding-right:.6rem;padding-left:1.2rem}.md-version__link:focus,.md-version__link:hover{background-color:var(--md-default-fg-color--lightest)}:root{--md-admonition-icon--note:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--abstract:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--info:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--tip:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--success:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--question:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--warning:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--failure:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--danger:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--bug:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--example:url("data:image/svg+xml;charset=utf-8,");--md-admonition-icon--quote:url("data:image/svg+xml;charset=utf-8,")}.md-typeset .admonition,.md-typeset details{display:flow-root;margin:1.5625em 0;padding:0 .6rem;color:var(--md-admonition-fg-color);font-size:.64rem;page-break-inside:avoid;background-color:var(--md-admonition-bg-color);border-left:.2rem solid #448aff;border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 .025rem .05rem rgba(0,0,0,.05)}@media print{.md-typeset .admonition,.md-typeset details{box-shadow:none}}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-right:.2rem solid #448aff;border-left:none}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin-top:1em;margin-bottom:1em}.md-typeset .admonition .md-typeset__scrollwrap,.md-typeset details .md-typeset__scrollwrap{margin:1em -.6rem}.md-typeset .admonition .md-typeset__table,.md-typeset details .md-typeset__table{padding:0 .6rem}.md-typeset .admonition>.tabbed-set:only-child,.md-typeset details>.tabbed-set:only-child{margin-top:0}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition-title,.md-typeset summary{position:relative;margin:0 -.6rem 0 -.8rem;padding:.4rem .6rem .4rem 2rem;font-weight:700;background-color:rgba(68,138,255,.1);border-left:.2rem solid #448aff;border-top-left-radius:.1rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{margin:0 -.8rem 0 -.6rem;padding:.4rem 2rem .4rem .6rem;border-right:.2rem solid #448aff;border-left:none}html .md-typeset .admonition-title:last-child,html .md-typeset summary:last-child{margin-bottom:0}.md-typeset .admonition-title:before,.md-typeset summary:before{position:absolute;left:.6rem;width:1rem;height:1rem;background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .admonition-title:before,[dir=rtl] .md-typeset summary:before{right:.6rem;left:auto}.md-typeset .admonition-title+.tabbed-set:last-child,.md-typeset summary+.tabbed-set:last-child{margin-top:0}.md-typeset .admonition.note,.md-typeset details.note{border-color:#448aff}.md-typeset .note>.admonition-title,.md-typeset .note>summary{background-color:rgba(68,138,255,.1);border-color:#448aff}.md-typeset .note>.admonition-title:before,.md-typeset .note>summary:before{background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-color:#00b0ff}.md-typeset .abstract>.admonition-title,.md-typeset .abstract>summary,.md-typeset .summary>.admonition-title,.md-typeset .summary>summary,.md-typeset .tldr>.admonition-title,.md-typeset .tldr>summary{background-color:rgba(0,176,255,.1);border-color:#00b0ff}.md-typeset .abstract>.admonition-title:before,.md-typeset .abstract>summary:before,.md-typeset .summary>.admonition-title:before,.md-typeset .summary>summary:before,.md-typeset .tldr>.admonition-title:before,.md-typeset .tldr>summary:before{background-color:#00b0ff;-webkit-mask-image:var(--md-admonition-icon--abstract);mask-image:var(--md-admonition-icon--abstract);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-color:#00b8d4}.md-typeset .info>.admonition-title,.md-typeset .info>summary,.md-typeset .todo>.admonition-title,.md-typeset .todo>summary{background-color:rgba(0,184,212,.1);border-color:#00b8d4}.md-typeset .info>.admonition-title:before,.md-typeset .info>summary:before,.md-typeset .todo>.admonition-title:before,.md-typeset .todo>summary:before{background-color:#00b8d4;-webkit-mask-image:var(--md-admonition-icon--info);mask-image:var(--md-admonition-icon--info);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-color:#00bfa5}.md-typeset .hint>.admonition-title,.md-typeset .hint>summary,.md-typeset .important>.admonition-title,.md-typeset .important>summary,.md-typeset .tip>.admonition-title,.md-typeset .tip>summary{background-color:rgba(0,191,165,.1);border-color:#00bfa5}.md-typeset .hint>.admonition-title:before,.md-typeset .hint>summary:before,.md-typeset .important>.admonition-title:before,.md-typeset .important>summary:before,.md-typeset .tip>.admonition-title:before,.md-typeset .tip>summary:before{background-color:#00bfa5;-webkit-mask-image:var(--md-admonition-icon--tip);mask-image:var(--md-admonition-icon--tip);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-color:#00c853}.md-typeset .check>.admonition-title,.md-typeset .check>summary,.md-typeset .done>.admonition-title,.md-typeset .done>summary,.md-typeset .success>.admonition-title,.md-typeset .success>summary{background-color:rgba(0,200,83,.1);border-color:#00c853}.md-typeset .check>.admonition-title:before,.md-typeset .check>summary:before,.md-typeset .done>.admonition-title:before,.md-typeset .done>summary:before,.md-typeset .success>.admonition-title:before,.md-typeset .success>summary:before{background-color:#00c853;-webkit-mask-image:var(--md-admonition-icon--success);mask-image:var(--md-admonition-icon--success);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-color:#64dd17}.md-typeset .faq>.admonition-title,.md-typeset .faq>summary,.md-typeset .help>.admonition-title,.md-typeset .help>summary,.md-typeset .question>.admonition-title,.md-typeset .question>summary{background-color:rgba(100,221,23,.1);border-color:#64dd17}.md-typeset .faq>.admonition-title:before,.md-typeset .faq>summary:before,.md-typeset .help>.admonition-title:before,.md-typeset .help>summary:before,.md-typeset .question>.admonition-title:before,.md-typeset .question>summary:before{background-color:#64dd17;-webkit-mask-image:var(--md-admonition-icon--question);mask-image:var(--md-admonition-icon--question);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-color:#ff9100}.md-typeset .attention>.admonition-title,.md-typeset .attention>summary,.md-typeset .caution>.admonition-title,.md-typeset .caution>summary,.md-typeset .warning>.admonition-title,.md-typeset .warning>summary{background-color:rgba(255,145,0,.1);border-color:#ff9100}.md-typeset .attention>.admonition-title:before,.md-typeset .attention>summary:before,.md-typeset .caution>.admonition-title:before,.md-typeset .caution>summary:before,.md-typeset .warning>.admonition-title:before,.md-typeset .warning>summary:before{background-color:#ff9100;-webkit-mask-image:var(--md-admonition-icon--warning);mask-image:var(--md-admonition-icon--warning);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-color:#ff5252}.md-typeset .fail>.admonition-title,.md-typeset .fail>summary,.md-typeset .failure>.admonition-title,.md-typeset .failure>summary,.md-typeset .missing>.admonition-title,.md-typeset .missing>summary{background-color:rgba(255,82,82,.1);border-color:#ff5252}.md-typeset .fail>.admonition-title:before,.md-typeset .fail>summary:before,.md-typeset .failure>.admonition-title:before,.md-typeset .failure>summary:before,.md-typeset .missing>.admonition-title:before,.md-typeset .missing>summary:before{background-color:#ff5252;-webkit-mask-image:var(--md-admonition-icon--failure);mask-image:var(--md-admonition-icon--failure);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-color:#ff1744}.md-typeset .danger>.admonition-title,.md-typeset .danger>summary,.md-typeset .error>.admonition-title,.md-typeset .error>summary{background-color:rgba(255,23,68,.1);border-color:#ff1744}.md-typeset .danger>.admonition-title:before,.md-typeset .danger>summary:before,.md-typeset .error>.admonition-title:before,.md-typeset .error>summary:before{background-color:#ff1744;-webkit-mask-image:var(--md-admonition-icon--danger);mask-image:var(--md-admonition-icon--danger);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.bug,.md-typeset details.bug{border-color:#f50057}.md-typeset .bug>.admonition-title,.md-typeset .bug>summary{background-color:rgba(245,0,87,.1);border-color:#f50057}.md-typeset .bug>.admonition-title:before,.md-typeset .bug>summary:before{background-color:#f50057;-webkit-mask-image:var(--md-admonition-icon--bug);mask-image:var(--md-admonition-icon--bug);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.example,.md-typeset details.example{border-color:#7c4dff}.md-typeset .example>.admonition-title,.md-typeset .example>summary{background-color:rgba(124,77,255,.1);border-color:#7c4dff}.md-typeset .example>.admonition-title:before,.md-typeset .example>summary:before{background-color:#7c4dff;-webkit-mask-image:var(--md-admonition-icon--example);mask-image:var(--md-admonition-icon--example);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-color:#9e9e9e}.md-typeset .cite>.admonition-title,.md-typeset .cite>summary,.md-typeset .quote>.admonition-title,.md-typeset .quote>summary{background-color:hsla(0,0%,62%,.1);border-color:#9e9e9e}.md-typeset .cite>.admonition-title:before,.md-typeset .cite>summary:before,.md-typeset .quote>.admonition-title:before,.md-typeset .quote>summary:before{background-color:#9e9e9e;-webkit-mask-image:var(--md-admonition-icon--quote);mask-image:var(--md-admonition-icon--quote);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}:root{--md-footnotes-icon:url("data:image/svg+xml;charset=utf-8,")}.md-typeset [id^="fnref:"]:target{scroll-margin-top:0;margin-top:-3.4rem;padding-top:3.4rem}.md-typeset [id^="fn:"]:target{scroll-margin-top:0;margin-top:-3.45rem;padding-top:3.45rem}.md-typeset .footnote{color:var(--md-default-fg-color--light);font-size:.64rem}.md-typeset .footnote>ol{margin-left:0}.md-typeset .footnote>ol>li{transition:color 125ms}.md-typeset .footnote>ol>li:target{color:var(--md-default-fg-color)}.md-typeset .footnote>ol>li:hover .footnote-backref,.md-typeset .footnote>ol>li:target .footnote-backref{transform:translateX(0);opacity:1}.md-typeset .footnote>ol>li>:first-child{margin-top:0}.md-typeset .footnote-backref{display:inline-block;color:var(--md-typeset-a-color);font-size:0;vertical-align:text-bottom;transform:translateX(.25rem);opacity:0;transition:color .25s,transform .25s .25s,opacity 125ms .25s}@media print{.md-typeset .footnote-backref{color:var(--md-typeset-a-color);transform:translateX(0);opacity:1}}[dir=rtl] .md-typeset .footnote-backref{transform:translateX(-.25rem)}.md-typeset .footnote-backref:hover{color:var(--md-accent-fg-color)}.md-typeset .footnote-backref:before{display:inline-block;width:.8rem;height:.8rem;background-color:currentColor;-webkit-mask-image:var(--md-footnotes-icon);mask-image:var(--md-footnotes-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .footnote-backref:before svg{transform:scaleX(-1)}.md-typeset .headerlink{display:inline-block;margin-left:.5rem;color:var(--md-default-fg-color--lighter);opacity:0;transition:color .25s,opacity 125ms}@media print{.md-typeset .headerlink{display:none}}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem;margin-left:0}.md-typeset .headerlink:focus,.md-typeset :hover>.headerlink,.md-typeset :target>.headerlink{opacity:1;transition:color .25s,opacity 125ms}.md-typeset .headerlink:focus,.md-typeset .headerlink:hover,.md-typeset :target>.headerlink{color:var(--md-accent-fg-color)}.md-typeset :target{scroll-margin-top:3.6rem}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset :target{scroll-margin-top:6rem}}.md-typeset h1:target,.md-typeset h2:target,.md-typeset h3:target{scroll-margin-top:0}.md-typeset h1:target:before,.md-typeset h2:target:before,.md-typeset h3:target:before{display:block;margin-top:-3.4rem;padding-top:3.4rem;content:""}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h1:target,.md-header--lifted~.md-container .md-typeset h2:target,.md-header--lifted~.md-container .md-typeset h3:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h1:target:before,.md-header--lifted~.md-container .md-typeset h2:target:before,.md-header--lifted~.md-container .md-typeset h3:target:before{margin-top:-5.8rem;padding-top:5.8rem}}.md-typeset h4:target{scroll-margin-top:0}.md-typeset h4:target:before{display:block;margin-top:-3.45rem;padding-top:3.45rem;content:""}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h4:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h4:target:before{margin-top:-5.85rem;padding-top:5.85rem}}.md-typeset h5:target,.md-typeset h6:target{scroll-margin-top:0}.md-typeset h5:target:before,.md-typeset h6:target:before{display:block;margin-top:-3.6rem;padding-top:3.6rem;content:""}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h5:target,.md-header--lifted~.md-container .md-typeset h6:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h5:target:before,.md-header--lifted~.md-container .md-typeset h6:target:before{margin-top:-6rem;padding-top:6rem}}.md-typeset div.arithmatex{overflow:auto}@media screen and (max-width:44.9375em){.md-typeset div.arithmatex{margin:0 -.8rem}}.md-typeset div.arithmatex>*{width:-webkit-min-content;width:-moz-min-content;width:min-content;margin:1em auto!important;padding:0 .8rem;touch-action:auto}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:var(--md-typeset-del-color)}.md-typeset ins.critic{background-color:var(--md-typeset-ins-color)}.md-typeset .critic.comment{color:var(--md-code-hl-comment-color)}.md-typeset .critic.comment:before{content:"/* "}.md-typeset .critic.comment:after{content:" */"}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:.8rem;padding-left:.8rem;overflow:auto;box-shadow:none}.md-typeset .critic.block>:first-child{margin-top:.5em}.md-typeset .critic.block>:last-child{margin-bottom:.5em}:root{--md-details-icon:url("data:image/svg+xml;charset=utf-8,")}.md-typeset details{display:flow-root;padding-top:0;overflow:visible}.md-typeset details[open]>summary:after{transform:rotate(90deg)}.md-typeset details:not([open]){padding-bottom:0;box-shadow:none}.md-typeset details:not([open])>summary{border-radius:.1rem}.md-typeset summary{display:block;min-height:1rem;padding:.4rem 1.8rem .4rem 2rem;border-top-left-radius:.1rem;border-top-right-radius:.1rem;cursor:pointer}[dir=rtl] .md-typeset summary{padding:.4rem 2.2rem .4rem 1.8rem}.md-typeset summary:not(.focus-visible){outline:none;-webkit-tap-highlight-color:transparent}.md-typeset summary:after{position:absolute;top:.4rem;right:.4rem;width:1rem;height:1rem;background-color:currentColor;-webkit-mask-image:var(--md-details-icon);mask-image:var(--md-details-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transform:rotate(0deg);transition:transform .25s;content:""}[dir=rtl] .md-typeset summary:after{right:auto;left:.4rem;transform:rotate(180deg)}.md-typeset summary::-webkit-details-marker,.md-typeset summary::marker{display:none}.md-typeset .emojione,.md-typeset .gemoji,.md-typeset .twemoji{display:inline-flex;height:1.125em;vertical-align:text-top}.md-typeset .emojione svg,.md-typeset .gemoji svg,.md-typeset .twemoji svg{width:1.125em;max-height:100%;fill:currentColor}.highlight .o,.highlight .ow{color:var(--md-code-hl-operator-color)}.highlight .p{color:var(--md-code-hl-punctuation-color)}.highlight .cpf,.highlight .l,.highlight .s,.highlight .s1,.highlight .s2,.highlight .sb,.highlight .sc,.highlight .si,.highlight .ss{color:var(--md-code-hl-string-color)}.highlight .cp,.highlight .se,.highlight .sh,.highlight .sr,.highlight .sx{color:var(--md-code-hl-special-color)}.highlight .il,.highlight .m,.highlight .mb,.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:var(--md-code-hl-number-color)}.highlight .k,.highlight .kd,.highlight .kn,.highlight .kp,.highlight .kr,.highlight .kt{color:var(--md-code-hl-keyword-color)}.highlight .kc,.highlight .n{color:var(--md-code-hl-name-color)}.highlight .bp,.highlight .nb,.highlight .no{color:var(--md-code-hl-constant-color)}.highlight .nc,.highlight .ne,.highlight .nf,.highlight .nn{color:var(--md-code-hl-function-color)}.highlight .nd,.highlight .ni,.highlight .nl,.highlight .nt{color:var(--md-code-hl-keyword-color)}.highlight .c,.highlight .c1,.highlight .ch,.highlight .cm,.highlight .cs,.highlight .sd{color:var(--md-code-hl-comment-color)}.highlight .na,.highlight .nv,.highlight .vc,.highlight .vg,.highlight .vi{color:var(--md-code-hl-variable-color)}.highlight .ge,.highlight .gh,.highlight .go,.highlight .gp,.highlight .gr,.highlight .gs,.highlight .gt,.highlight .gu{color:var(--md-code-hl-generic-color)}.highlight .gd,.highlight .gi{margin:0 -.125em;padding:0 .125em;border-radius:.1rem}.highlight .gd{background-color:var(--md-typeset-del-color)}.highlight .gi{background-color:var(--md-typeset-ins-color)}.highlight .hll{display:block;margin:0 -1.1764705882em;padding:0 1.1764705882em;background-color:var(--md-code-hl-color)}.highlight [data-linenos]:before{position:-webkit-sticky;position:sticky;left:-1.1764705882em;z-index:3;float:left;margin-right:1.1764705882em;margin-left:-1.1764705882em;padding-left:1.1764705882em;color:var(--md-default-fg-color--light);background-color:var(--md-code-bg-color);box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;content:attr(data-linenos);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.highlighttable{display:flow-root}.highlighttable tbody,.highlighttable td{display:block;padding:0}.highlighttable tr{display:flex}.highlighttable pre{margin:0}.highlighttable .linenos{padding:.7720588235em 0 .7720588235em 1.1764705882em;font-size:.85em;background-color:var(--md-code-bg-color);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.highlighttable .linenodiv{padding-right:.5882352941em;box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset}.highlighttable .linenodiv pre{color:var(--md-default-fg-color--light);text-align:right}.highlighttable .code{flex:1;min-width:0}.md-typeset .highlighttable{margin:1em 0;direction:ltr;border-radius:.1rem}.md-typeset .highlighttable code{border-radius:0}@media screen and (max-width:44.9375em){.md-typeset.md-content__inner>.highlight{margin:1em -.8rem}.md-typeset.md-content__inner>.highlight .hll{margin:0 -.8rem;padding:0 .8rem}.md-typeset.md-content__inner>.highlight code{border-radius:0}.md-typeset>.highlighttable{margin:1em -.8rem;border-radius:0}.md-typeset>.highlighttable .hll{margin:0 -.8rem;padding:0 .8rem}}.md-typeset .keys kbd:after,.md-typeset .keys kbd:before{position:relative;margin:0;color:inherit;-moz-osx-font-smoothing:initial;-webkit-font-smoothing:initial}.md-typeset .keys span{padding:0 .2em;color:var(--md-default-fg-color--light)}.md-typeset .keys .key-alt:before,.md-typeset .keys .key-left-alt:before,.md-typeset .keys .key-right-alt:before{padding-right:.4em;content:"⎇"}.md-typeset .keys .key-command:before,.md-typeset .keys .key-left-command:before,.md-typeset .keys .key-right-command:before{padding-right:.4em;content:"⌘"}.md-typeset .keys .key-control:before,.md-typeset .keys .key-left-control:before,.md-typeset .keys .key-right-control:before{padding-right:.4em;content:"⌃"}.md-typeset .keys .key-left-meta:before,.md-typeset .keys .key-meta:before,.md-typeset .keys .key-right-meta:before{padding-right:.4em;content:"◆"}.md-typeset .keys .key-left-option:before,.md-typeset .keys .key-option:before,.md-typeset .keys .key-right-option:before{padding-right:.4em;content:"⌥"}.md-typeset .keys .key-left-shift:before,.md-typeset .keys .key-right-shift:before,.md-typeset .keys .key-shift:before{padding-right:.4em;content:"⇧"}.md-typeset .keys .key-left-super:before,.md-typeset .keys .key-right-super:before,.md-typeset .keys .key-super:before{padding-right:.4em;content:"❖"}.md-typeset .keys .key-left-windows:before,.md-typeset .keys .key-right-windows:before,.md-typeset .keys .key-windows:before{padding-right:.4em;content:"⊞"}.md-typeset .keys .key-arrow-down:before{padding-right:.4em;content:"↓"}.md-typeset .keys .key-arrow-left:before{padding-right:.4em;content:"←"}.md-typeset .keys .key-arrow-right:before{padding-right:.4em;content:"→"}.md-typeset .keys .key-arrow-up:before{padding-right:.4em;content:"↑"}.md-typeset .keys .key-backspace:before{padding-right:.4em;content:"⌫"}.md-typeset .keys .key-backtab:before{padding-right:.4em;content:"⇤"}.md-typeset .keys .key-caps-lock:before{padding-right:.4em;content:"⇪"}.md-typeset .keys .key-clear:before{padding-right:.4em;content:"⌧"}.md-typeset .keys .key-context-menu:before{padding-right:.4em;content:"☰"}.md-typeset .keys .key-delete:before{padding-right:.4em;content:"⌦"}.md-typeset .keys .key-eject:before{padding-right:.4em;content:"⏏"}.md-typeset .keys .key-end:before{padding-right:.4em;content:"⤓"}.md-typeset .keys .key-escape:before{padding-right:.4em;content:"⎋"}.md-typeset .keys .key-home:before{padding-right:.4em;content:"⤒"}.md-typeset .keys .key-insert:before{padding-right:.4em;content:"⎀"}.md-typeset .keys .key-page-down:before{padding-right:.4em;content:"⇟"}.md-typeset .keys .key-page-up:before{padding-right:.4em;content:"⇞"}.md-typeset .keys .key-print-screen:before{padding-right:.4em;content:"⎙"}.md-typeset .keys .key-tab:after{padding-left:.4em;content:"⇥"}.md-typeset .keys .key-num-enter:after{padding-left:.4em;content:"⌤"}.md-typeset .keys .key-enter:after{padding-left:.4em;content:"⏎"}.md-typeset .tabbed-content{display:none;order:99;width:100%;box-shadow:0 -.05rem var(--md-default-fg-color--lightest)}@media print{.md-typeset .tabbed-content{display:block;order:0}}.md-typeset .tabbed-content>.highlight:only-child pre,.md-typeset .tabbed-content>.highlighttable:only-child,.md-typeset .tabbed-content>pre:only-child{margin:0}.md-typeset .tabbed-content>.highlight:only-child pre>code,.md-typeset .tabbed-content>.highlighttable:only-child>code,.md-typeset .tabbed-content>pre:only-child>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-content>.tabbed-set{margin:0}.md-typeset .tabbed-set{position:relative;display:flex;flex-wrap:wrap;margin:1em 0;border-radius:.1rem}.md-typeset .tabbed-set>input{position:absolute;width:0;height:0;opacity:0}.md-typeset .tabbed-set>input:checked+label{color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color)}.md-typeset .tabbed-set>input:checked+label+.tabbed-content{display:block}.md-typeset .tabbed-set>input:focus+label{outline-style:auto}.md-typeset .tabbed-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.md-typeset .tabbed-set>label{z-index:1;width:auto;padding:.9375em 1.25em .78125em;color:var(--md-default-fg-color--light);font-weight:700;font-size:.64rem;border-bottom:.1rem solid transparent;cursor:pointer;transition:color .25s}.md-typeset .tabbed-set>label:hover{color:var(--md-accent-fg-color)}:root{--md-tasklist-icon:url("data:image/svg+xml;charset=utf-8,");--md-tasklist-icon--checked:url("data:image/svg+xml;charset=utf-8,")}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em;left:auto}.md-typeset .task-list-control [type=checkbox]{z-index:-1;opacity:0}.md-typeset .task-list-indicator:before{position:absolute;top:.15em;left:-1.5em;width:1.25em;height:1.25em;background-color:var(--md-default-fg-color--lightest);-webkit-mask-image:var(--md-tasklist-icon);mask-image:var(--md-tasklist-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;content:""}[dir=rtl] .md-typeset .task-list-indicator:before{right:-1.5em;left:auto}.md-typeset [type=checkbox]:checked+.task-list-indicator:before{background-color:#00e676;-webkit-mask-image:var(--md-tasklist-icon--checked);mask-image:var(--md-tasklist-icon--checked)}:root>*{--md-mermaid-font-family:var(--md-text-font-family) sans-serif;--md-mermaid-edge-color:var(--md-default-fg-color);--md-mermaid-node-bg-color:var(--md-accent-fg-color--transparent);--md-mermaid-node-fg-color:var(--md-accent-fg-color);--md-mermaid-label-bg-color:var(--md-default-bg-color);--md-mermaid-label-fg-color:var(--md-default-fg-color)}@media screen and (min-width:45em){.md-typeset .inline{float:left;width:11.7rem;margin-top:0;margin-right:.8rem;margin-bottom:.8rem}.md-typeset .inline.end,[dir=rtl] .md-typeset .inline{float:right;margin-right:0;margin-left:.8rem}[dir=rtl] .md-typeset .inline.end{float:left;margin-right:.8rem;margin-left:0}} \ No newline at end of file diff --git a/0.2/assets/stylesheets/palette.c308bc62.min.css b/0.2/assets/stylesheets/palette.c308bc62.min.css deleted file mode 100644 index 16b060a0..00000000 --- a/0.2/assets/stylesheets/palette.c308bc62.min.css +++ /dev/null @@ -1 +0,0 @@ -[data-md-color-accent=red]{--md-accent-fg-color: hsla(348, 100%, 55%, 1);--md-accent-fg-color--transparent: hsla(348, 100%, 55%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=pink]{--md-accent-fg-color: hsla(339, 100%, 48%, 1);--md-accent-fg-color--transparent: hsla(339, 100%, 48%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=purple]{--md-accent-fg-color: hsla(291, 96%, 62%, 1);--md-accent-fg-color--transparent: hsla(291, 96%, 62%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=deep-purple]{--md-accent-fg-color: hsla(256, 100%, 65%, 1);--md-accent-fg-color--transparent: hsla(256, 100%, 65%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=indigo]{--md-accent-fg-color: hsla(231, 99%, 66%, 1);--md-accent-fg-color--transparent: hsla(231, 99%, 66%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=blue]{--md-accent-fg-color: hsla(218, 100%, 63%, 1);--md-accent-fg-color--transparent: hsla(218, 100%, 63%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=light-blue]{--md-accent-fg-color: hsla(203, 100%, 46%, 1);--md-accent-fg-color--transparent: hsla(203, 100%, 46%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=cyan]{--md-accent-fg-color: hsla(188, 100%, 42%, 1);--md-accent-fg-color--transparent: hsla(188, 100%, 42%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=teal]{--md-accent-fg-color: hsla(172, 100%, 37%, 1);--md-accent-fg-color--transparent: hsla(172, 100%, 37%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=green]{--md-accent-fg-color: hsla(145, 100%, 39%, 1);--md-accent-fg-color--transparent: hsla(145, 100%, 39%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=light-green]{--md-accent-fg-color: hsla(97, 81%, 48%, 1);--md-accent-fg-color--transparent: hsla(97, 81%, 48%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-accent=lime]{--md-accent-fg-color: hsla(75, 100%, 46%, 1);--md-accent-fg-color--transparent: hsla(75, 100%, 46%, 0.1);--md-accent-bg-color: hsla(0, 0%, 0%, 0.87);--md-accent-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-accent=yellow]{--md-accent-fg-color: hsla(50, 100%, 50%, 1);--md-accent-fg-color--transparent: hsla(50, 100%, 50%, 0.1);--md-accent-bg-color: hsla(0, 0%, 0%, 0.87);--md-accent-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-accent=amber]{--md-accent-fg-color: hsla(40, 100%, 50%, 1);--md-accent-fg-color--transparent: hsla(40, 100%, 50%, 0.1);--md-accent-bg-color: hsla(0, 0%, 0%, 0.87);--md-accent-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-accent=orange]{--md-accent-fg-color: hsla(34, 100%, 50%, 1);--md-accent-fg-color--transparent: hsla(34, 100%, 50%, 0.1);--md-accent-bg-color: hsla(0, 0%, 0%, 0.87);--md-accent-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-accent=deep-orange]{--md-accent-fg-color: hsla(14, 100%, 63%, 1);--md-accent-fg-color--transparent: hsla(14, 100%, 63%, 0.1);--md-accent-bg-color: hsla(0, 0%, 100%, 1);--md-accent-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=red]{--md-primary-fg-color: hsla(1, 83%, 63%, 1);--md-primary-fg-color--light: hsla(0, 69%, 67%, 1);--md-primary-fg-color--dark: hsla(1, 77%, 55%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=pink]{--md-primary-fg-color: hsla(340, 82%, 52%, 1);--md-primary-fg-color--light: hsla(340, 82%, 59%, 1);--md-primary-fg-color--dark: hsla(336, 78%, 43%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=purple]{--md-primary-fg-color: hsla(291, 47%, 51%, 1);--md-primary-fg-color--light: hsla(291, 47%, 60%, 1);--md-primary-fg-color--dark: hsla(287, 65%, 40%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=deep-purple]{--md-primary-fg-color: hsla(262, 47%, 55%, 1);--md-primary-fg-color--light: hsla(262, 47%, 63%, 1);--md-primary-fg-color--dark: hsla(262, 52%, 47%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=indigo]{--md-primary-fg-color: hsla(231, 48%, 48%, 1);--md-primary-fg-color--light: hsla(231, 44%, 56%, 1);--md-primary-fg-color--dark: hsla(232, 54%, 41%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=blue]{--md-primary-fg-color: hsla(207, 90%, 54%, 1);--md-primary-fg-color--light: hsla(207, 90%, 61%, 1);--md-primary-fg-color--dark: hsla(210, 79%, 46%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=light-blue]{--md-primary-fg-color: hsla(199, 98%, 48%, 1);--md-primary-fg-color--light: hsla(199, 92%, 56%, 1);--md-primary-fg-color--dark: hsla(201, 98%, 41%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=cyan]{--md-primary-fg-color: hsla(187, 100%, 42%, 1);--md-primary-fg-color--light: hsla(187, 71%, 50%, 1);--md-primary-fg-color--dark: hsla(186, 100%, 33%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=teal]{--md-primary-fg-color: hsla(174, 100%, 29%, 1);--md-primary-fg-color--light: hsla(174, 63%, 40%, 1);--md-primary-fg-color--dark: hsla(173, 100%, 24%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=green]{--md-primary-fg-color: hsla(122, 39%, 49%, 1);--md-primary-fg-color--light: hsla(123, 38%, 57%, 1);--md-primary-fg-color--dark: hsla(123, 43%, 39%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=light-green]{--md-primary-fg-color: hsla(88, 50%, 53%, 1);--md-primary-fg-color--light: hsla(88, 50%, 60%, 1);--md-primary-fg-color--dark: hsla(92, 48%, 42%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=lime]{--md-primary-fg-color: hsla(66, 70%, 54%, 1);--md-primary-fg-color--light: hsla(66, 70%, 61%, 1);--md-primary-fg-color--dark: hsla(62, 61%, 44%, 1);--md-primary-bg-color: hsla(0, 0%, 0%, 0.87);--md-primary-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-primary=yellow]{--md-primary-fg-color: hsla(54, 100%, 62%, 1);--md-primary-fg-color--light: hsla(54, 100%, 67%, 1);--md-primary-fg-color--dark: hsla(43, 96%, 58%, 1);--md-primary-bg-color: hsla(0, 0%, 0%, 0.87);--md-primary-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-primary=amber]{--md-primary-fg-color: hsla(45, 100%, 51%, 1);--md-primary-fg-color--light: hsla(45, 100%, 58%, 1);--md-primary-fg-color--dark: hsla(38, 100%, 50%, 1);--md-primary-bg-color: hsla(0, 0%, 0%, 0.87);--md-primary-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-primary=orange]{--md-primary-fg-color: hsla(36, 100%, 57%, 1);--md-primary-fg-color--light: hsla(36, 100%, 57%, 1);--md-primary-fg-color--dark: hsla(33, 100%, 49%, 1);--md-primary-bg-color: hsla(0, 0%, 0%, 0.87);--md-primary-bg-color--light: hsla(0, 0%, 0%, 0.54)}[data-md-color-primary=deep-orange]{--md-primary-fg-color: hsla(14, 100%, 63%, 1);--md-primary-fg-color--light: hsla(14, 100%, 70%, 1);--md-primary-fg-color--dark: hsla(14, 91%, 54%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=brown]{--md-primary-fg-color: hsla(16, 25%, 38%, 1);--md-primary-fg-color--light: hsla(16, 18%, 47%, 1);--md-primary-fg-color--dark: hsla(14, 26%, 29%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=grey]{--md-primary-fg-color: hsla(0, 0%, 46%, 1);--md-primary-fg-color--light: hsla(0, 0%, 62%, 1);--md-primary-fg-color--dark: hsla(0, 0%, 38%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=blue-grey]{--md-primary-fg-color: hsla(199, 18%, 40%, 1);--md-primary-fg-color--light: hsla(200, 18%, 46%, 1);--md-primary-fg-color--dark: hsla(199, 18%, 33%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7)}[data-md-color-primary=white]{--md-primary-fg-color: hsla(0, 0%, 100%, 1);--md-primary-fg-color--light: hsla(0, 0%, 100%, 0.7);--md-primary-fg-color--dark: hsla(0, 0%, 0%, 0.07);--md-primary-bg-color: hsla(0, 0%, 0%, 0.87);--md-primary-bg-color--light: hsla(0, 0%, 0%, 0.54);--md-typeset-a-color: hsla(231, 48%, 48%, 1)}@media screen and (min-width: 60em){[data-md-color-primary=white] .md-search__input{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:rgba(0,0,0,.87)}[data-md-color-primary=white] .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input:hover{background-color:rgba(0,0,0,.32)}}@media screen and (min-width: 76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid rgba(0,0,0,.07)}}[data-md-color-primary=black]{--md-primary-fg-color: hsla(0, 0%, 0%, 1);--md-primary-fg-color--light: hsla(0, 0%, 0%, 0.54);--md-primary-fg-color--dark: hsla(0, 0%, 0%, 1);--md-primary-bg-color: hsla(0, 0%, 100%, 1);--md-primary-bg-color--light: hsla(0, 0%, 100%, 0.7);--md-typeset-a-color: hsla(231, 48%, 48%, 1)}[data-md-color-primary=black] .md-header{background-color:#000}@media screen and (max-width: 59.9375em){[data-md-color-primary=black] .md-nav__source{background-color:rgba(0,0,0,.87)}}@media screen and (min-width: 60em){[data-md-color-primary=black] .md-search__input{background-color:rgba(255,255,255,.12)}[data-md-color-primary=black] .md-search__input:hover{background-color:rgba(255,255,255,.3)}}@media screen and (max-width: 76.1875em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:#000}}@media screen and (min-width: 76.25em){[data-md-color-primary=black] .md-tabs{background-color:#000}}@media screen{[data-md-color-scheme=slate]{--md-hue: 232;--md-default-fg-color: hsla(var(--md-hue), 75%, 95%, 1);--md-default-fg-color--light: hsla(var(--md-hue), 75%, 90%, 0.62);--md-default-fg-color--lighter: hsla(var(--md-hue), 75%, 90%, 0.32);--md-default-fg-color--lightest: hsla(var(--md-hue), 75%, 90%, 0.12);--md-default-bg-color: hsla(var(--md-hue), 15%, 21%, 1);--md-default-bg-color--light: hsla(var(--md-hue), 15%, 21%, 0.54);--md-default-bg-color--lighter: hsla(var(--md-hue), 15%, 21%, 0.26);--md-default-bg-color--lightest: hsla(var(--md-hue), 15%, 21%, 0.07);--md-code-fg-color: hsla(var(--md-hue), 18%, 86%, 1);--md-code-bg-color: hsla(var(--md-hue), 15%, 15%, 1);--md-code-hl-color: hsla(218, 100%, 63%, 0.15);--md-code-hl-number-color: hsla(6, 74%, 63%, 1);--md-code-hl-special-color: hsla(340, 83%, 66%, 1);--md-code-hl-function-color: hsla(291, 57%, 65%, 1);--md-code-hl-constant-color: hsla(250, 62%, 70%, 1);--md-code-hl-keyword-color: hsla(219, 66%, 64%, 1);--md-code-hl-string-color: hsla(150, 58%, 44%, 1);--md-typeset-a-color: var(--md-primary-fg-color--light);--md-typeset-mark-color: hsla(218, 100%, 63%, 0.3);--md-typeset-kbd-color: hsla(var(--md-hue), 15%, 94%, 0.12);--md-typeset-kbd-accent-color: hsla(var(--md-hue), 15%, 94%, 0.2);--md-typeset-kbd-border-color: hsla(var(--md-hue), 15%, 14%, 1);--md-admonition-bg-color: hsla(var(--md-hue), 0%, 100%, 0.025);--md-footer-bg-color: hsla(var(--md-hue), 15%, 12%, 0.87);--md-footer-bg-color--dark: hsla(var(--md-hue), 15%, 10%, 1)}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color: hsla(231, 44%, 56%, 1)}} diff --git a/0.2/assets/stylesheets/palette.de2705de.min.css b/0.2/assets/stylesheets/palette.de2705de.min.css new file mode 100644 index 00000000..e37181b9 --- /dev/null +++ b/0.2/assets/stylesheets/palette.de2705de.min.css @@ -0,0 +1 @@ +[data-md-color-accent=red]{--md-accent-fg-color:#ff1a47;--md-accent-fg-color--transparent:rgba(255,26,71,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=pink]{--md-accent-fg-color:#f50056;--md-accent-fg-color--transparent:rgba(245,0,86,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=purple]{--md-accent-fg-color:#df41fb;--md-accent-fg-color--transparent:rgba(223,65,251,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=deep-purple]{--md-accent-fg-color:#7c4dff;--md-accent-fg-color--transparent:rgba(124,77,255,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=indigo]{--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:rgba(83,108,254,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=blue]{--md-accent-fg-color:#4287ff;--md-accent-fg-color--transparent:rgba(66,136,255,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=light-blue]{--md-accent-fg-color:#0091eb;--md-accent-fg-color--transparent:rgba(0,145,235,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=cyan]{--md-accent-fg-color:#00bad6;--md-accent-fg-color--transparent:rgba(0,186,214,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=teal]{--md-accent-fg-color:#00bda4;--md-accent-fg-color--transparent:rgba(0,189,164,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=green]{--md-accent-fg-color:#00c753;--md-accent-fg-color--transparent:rgba(0,199,83,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=light-green]{--md-accent-fg-color:#63de17;--md-accent-fg-color--transparent:rgba(99,222,23,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=lime]{--md-accent-fg-color:#b0eb00;--md-accent-fg-color--transparent:rgba(176,235,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=yellow]{--md-accent-fg-color:#ffd500;--md-accent-fg-color--transparent:rgba(255,213,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=amber]{--md-accent-fg-color:#fa0;--md-accent-fg-color--transparent:rgba(255,170,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=orange]{--md-accent-fg-color:#ff9100;--md-accent-fg-color--transparent:rgba(255,145,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=deep-orange]{--md-accent-fg-color:#ff6e42;--md-accent-fg-color--transparent:rgba(255,110,66,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=red]{--md-primary-fg-color:#ef5552;--md-primary-fg-color--light:#e57171;--md-primary-fg-color--dark:#e53734;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=pink]{--md-primary-fg-color:#e92063;--md-primary-fg-color--light:#ec417a;--md-primary-fg-color--dark:#c3185d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=purple]{--md-primary-fg-color:#ab47bd;--md-primary-fg-color--light:#bb69c9;--md-primary-fg-color--dark:#8c24a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=deep-purple]{--md-primary-fg-color:#7e56c2;--md-primary-fg-color--light:#9574cd;--md-primary-fg-color--dark:#673ab6;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=indigo]{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=blue]{--md-primary-fg-color:#2094f3;--md-primary-fg-color--light:#42a5f5;--md-primary-fg-color--dark:#1975d2;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=light-blue]{--md-primary-fg-color:#02a6f2;--md-primary-fg-color--light:#28b5f6;--md-primary-fg-color--dark:#0287cf;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=cyan]{--md-primary-fg-color:#00bdd6;--md-primary-fg-color--light:#25c5da;--md-primary-fg-color--dark:#0097a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=teal]{--md-primary-fg-color:#009485;--md-primary-fg-color--light:#26a699;--md-primary-fg-color--dark:#007a6c;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=green]{--md-primary-fg-color:#4cae4f;--md-primary-fg-color--light:#68bb6c;--md-primary-fg-color--dark:#398e3d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=light-green]{--md-primary-fg-color:#8bc34b;--md-primary-fg-color--light:#9ccc66;--md-primary-fg-color--dark:#689f38;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=lime]{--md-primary-fg-color:#cbdc38;--md-primary-fg-color--light:#d3e156;--md-primary-fg-color--dark:#b0b52c;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=yellow]{--md-primary-fg-color:#ffec3d;--md-primary-fg-color--light:#ffee57;--md-primary-fg-color--dark:#fbc02d;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=amber]{--md-primary-fg-color:#ffc105;--md-primary-fg-color--light:#ffc929;--md-primary-fg-color--dark:#ffa200;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=orange]{--md-primary-fg-color:#ffa724;--md-primary-fg-color--light:#ffa724;--md-primary-fg-color--dark:#fa8900;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=deep-orange]{--md-primary-fg-color:#ff6e42;--md-primary-fg-color--light:#ff8a66;--md-primary-fg-color--dark:#f4511f;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=brown]{--md-primary-fg-color:#795649;--md-primary-fg-color--light:#8d6e62;--md-primary-fg-color--dark:#5d4037;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=grey]{--md-primary-fg-color:#757575;--md-primary-fg-color--light:#9e9e9e;--md-primary-fg-color--dark:#616161;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=blue-grey]{--md-primary-fg-color:#546d78;--md-primary-fg-color--light:#607c8a;--md-primary-fg-color--dark:#455a63;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=white]{--md-primary-fg-color:#fff;--md-primary-fg-color--light:hsla(0,0%,100%,0.7);--md-primary-fg-color--dark:rgba(0,0,0,0.07);--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54);--md-typeset-a-color:#4051b5}@media screen and (min-width:60em){[data-md-color-primary=white] .md-search__form{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__form:hover{background-color:rgba(0,0,0,.32)}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:rgba(0,0,0,.87)}}@media screen and (min-width:76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid rgba(0,0,0,.07)}}[data-md-color-primary=black]{--md-primary-fg-color:#000;--md-primary-fg-color--light:rgba(0,0,0,0.54);--md-primary-fg-color--dark:#000;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7);--md-typeset-a-color:#4051b5}[data-md-color-primary=black] .md-header{background-color:#000}@media screen and (max-width:59.9375em){[data-md-color-primary=black] .md-nav__source{background-color:rgba(0,0,0,.87)}}@media screen and (min-width:60em){[data-md-color-primary=black] .md-search__form{background-color:hsla(0,0%,100%,.12)}[data-md-color-primary=black] .md-search__form:hover{background-color:hsla(0,0%,100%,.3)}}@media screen and (max-width:76.1875em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:#000}}@media screen and (min-width:76.25em){[data-md-color-primary=black] .md-tabs{background-color:#000}}@media screen{[data-md-color-scheme=slate]{--md-hue:232;--md-default-fg-color:hsla(var(--md-hue),75%,95%,1);--md-default-fg-color--light:hsla(var(--md-hue),75%,90%,0.62);--md-default-fg-color--lighter:hsla(var(--md-hue),75%,90%,0.32);--md-default-fg-color--lightest:hsla(var(--md-hue),75%,90%,0.12);--md-default-bg-color:hsla(var(--md-hue),15%,21%,1);--md-default-bg-color--light:hsla(var(--md-hue),15%,21%,0.54);--md-default-bg-color--lighter:hsla(var(--md-hue),15%,21%,0.26);--md-default-bg-color--lightest:hsla(var(--md-hue),15%,21%,0.07);--md-code-fg-color:hsla(var(--md-hue),18%,86%,1);--md-code-bg-color:hsla(var(--md-hue),15%,15%,1);--md-code-hl-color:rgba(66,136,255,0.15);--md-code-hl-number-color:#e6695b;--md-code-hl-special-color:#f06090;--md-code-hl-function-color:#c973d9;--md-code-hl-constant-color:#9383e2;--md-code-hl-keyword-color:#6791e0;--md-code-hl-string-color:#2fb170;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color--light);--md-typeset-mark-color:rgba(66,136,255,0.3);--md-typeset-kbd-color:hsla(var(--md-hue),15%,94%,0.12);--md-typeset-kbd-accent-color:hsla(var(--md-hue),15%,94%,0.2);--md-typeset-kbd-border-color:hsla(var(--md-hue),15%,14%,1);--md-admonition-bg-color:hsla(var(--md-hue),0%,100%,0.025);--md-footer-bg-color:hsla(var(--md-hue),15%,12%,0.87);--md-footer-bg-color--dark:hsla(var(--md-hue),15%,10%,1)}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color:#5d6cc0}} \ No newline at end of file diff --git a/0.2/change-log/index.html b/0.2/change-log/index.html index c32dc0f5..4d7fe88f 100644 --- a/0.2/change-log/index.html +++ b/0.2/change-log/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/citation/index.html b/0.2/citation/index.html index c6651bcd..d879b6e9 100644 --- a/0.2/citation/index.html +++ b/0.2/citation/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/code_of_conduct/index.html b/0.2/code_of_conduct/index.html index 9ae55900..28c81ca2 100644 --- a/0.2/code_of_conduct/index.html +++ b/0.2/code_of_conduct/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ +
+ +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/documentation/index.html b/0.2/developers/documentation/index.html index e664be34..689b2c20 100644 --- a/0.2/developers/documentation/index.html +++ b/0.2/developers/documentation/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ +
+ +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/git-flow/index.html b/0.2/developers/git-flow/index.html index 11421fff..34a0f554 100644 --- a/0.2/developers/git-flow/index.html +++ b/0.2/developers/git-flow/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/remote-support/index.html b/0.2/developers/remote-support/index.html index ae6c70e5..87a16570 100644 --- a/0.2/developers/remote-support/index.html +++ b/0.2/developers/remote-support/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/test-cases/index.html b/0.2/developers/test-cases/index.html index f511c774..acca69bd 100644 --- a/0.2/developers/test-cases/index.html +++ b/0.2/developers/test-cases/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/testing/index.html b/0.2/developers/testing/index.html index f5612999..227dbc36 100644 --- a/0.2/developers/testing/index.html +++ b/0.2/developers/testing/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/developers/virtual-environments/index.html b/0.2/developers/virtual-environments/index.html index c214dece..a44c970d 100644 --- a/0.2/developers/virtual-environments/index.html +++ b/0.2/developers/virtual-environments/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/faq/index.html b/0.2/faq/index.html index 41c232a9..67ccb341 100644 --- a/0.2/faq/index.html +++ b/0.2/faq/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/add-new-features/index.html b/0.2/features/add-new-features/index.html index 19095a9d..99c52076 100644 --- a/0.2/features/add-new-features/index.html +++ b/0.2/features/add-new-features/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/feature-introduction/index.html b/0.2/features/feature-introduction/index.html index 220aa34b..8527aa42 100644 --- a/0.2/features/feature-introduction/index.html +++ b/0.2/features/feature-introduction/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/fitbit-heartrate-intraday/index.html b/0.2/features/fitbit-heartrate-intraday/index.html index 5d66fc5c..fbf280c1 100644 --- a/0.2/features/fitbit-heartrate-intraday/index.html +++ b/0.2/features/fitbit-heartrate-intraday/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/fitbit-heartrate-summary/index.html b/0.2/features/fitbit-heartrate-summary/index.html index 422ee80c..8184b9c8 100644 --- a/0.2/features/fitbit-heartrate-summary/index.html +++ b/0.2/features/fitbit-heartrate-summary/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/fitbit-sleep-summary/index.html b/0.2/features/fitbit-sleep-summary/index.html index cfe2a4a1..f959d622 100644 --- a/0.2/features/fitbit-sleep-summary/index.html +++ b/0.2/features/fitbit-sleep-summary/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/fitbit-steps-intraday/index.html b/0.2/features/fitbit-steps-intraday/index.html index a78938a1..8860e076 100644 --- a/0.2/features/fitbit-steps-intraday/index.html +++ b/0.2/features/fitbit-steps-intraday/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/fitbit-steps-summary/index.html b/0.2/features/fitbit-steps-summary/index.html index 3f30ad10..1625c51a 100644 --- a/0.2/features/fitbit-steps-summary/index.html +++ b/0.2/features/fitbit-steps-summary/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-accelerometer/index.html b/0.2/features/phone-accelerometer/index.html index a0f0f2e0..a1dc4ecd 100644 --- a/0.2/features/phone-accelerometer/index.html +++ b/0.2/features/phone-accelerometer/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-activity-recognition/index.html b/0.2/features/phone-activity-recognition/index.html index 3d06d640..880c4e39 100644 --- a/0.2/features/phone-activity-recognition/index.html +++ b/0.2/features/phone-activity-recognition/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-applications-foreground/index.html b/0.2/features/phone-applications-foreground/index.html index 5f538757..f7ff14f2 100644 --- a/0.2/features/phone-applications-foreground/index.html +++ b/0.2/features/phone-applications-foreground/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-battery/index.html b/0.2/features/phone-battery/index.html index a68e5b22..b2c62801 100644 --- a/0.2/features/phone-battery/index.html +++ b/0.2/features/phone-battery/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-bluetooth/index.html b/0.2/features/phone-bluetooth/index.html index 5c0d460a..09653b4f 100644 --- a/0.2/features/phone-bluetooth/index.html +++ b/0.2/features/phone-bluetooth/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-calls/index.html b/0.2/features/phone-calls/index.html index 2cb66022..e62a40a0 100644 --- a/0.2/features/phone-calls/index.html +++ b/0.2/features/phone-calls/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-conversation/index.html b/0.2/features/phone-conversation/index.html index f6724e22..338618ed 100644 --- a/0.2/features/phone-conversation/index.html +++ b/0.2/features/phone-conversation/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-data-yield/index.html b/0.2/features/phone-data-yield/index.html index 6cf88bc9..71165c40 100644 --- a/0.2/features/phone-data-yield/index.html +++ b/0.2/features/phone-data-yield/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-light/index.html b/0.2/features/phone-light/index.html index 6d924788..5a7cd6b9 100644 --- a/0.2/features/phone-light/index.html +++ b/0.2/features/phone-light/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-locations/index.html b/0.2/features/phone-locations/index.html index eea5fb60..28bc8560 100644 --- a/0.2/features/phone-locations/index.html +++ b/0.2/features/phone-locations/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-screen/index.html b/0.2/features/phone-screen/index.html index acb3c681..54ef383d 100644 --- a/0.2/features/phone-screen/index.html +++ b/0.2/features/phone-screen/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-wifi-connected/index.html b/0.2/features/phone-wifi-connected/index.html index a513879c..169020a2 100644 --- a/0.2/features/phone-wifi-connected/index.html +++ b/0.2/features/phone-wifi-connected/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/features/phone-wifi-visible/index.html b/0.2/features/phone-wifi-visible/index.html index 7fb79c45..f02d00ed 100644 --- a/0.2/features/phone-wifi-visible/index.html +++ b/0.2/features/phone-wifi-visible/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/file-structure/index.html b/0.2/file-structure/index.html index b31328c0..679883b4 100644 --- a/0.2/file-structure/index.html +++ b/0.2/file-structure/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/index.html b/0.2/index.html index 4b13376f..7a8b7707 100644 --- a/0.2/index.html +++ b/0.2/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/javascripts/extra.js b/0.2/javascripts/extra.js index b6d5686d..e69de29b 100644 --- a/0.2/javascripts/extra.js +++ b/0.2/javascripts/extra.js @@ -1,14 +0,0 @@ -window.addEventListener("DOMContentLoaded", function() { - var xhr = new XMLHttpRequest(); - xhr.open("GET", window.location.origin + "/versions.json"); - xhr.onload = function() { - var versions = JSON.parse(this.responseText); - latest_version = "" - for(id in versions) - if(versions[id]["aliases"].length > 0 && versions[id]["aliases"].includes("latest")) - latest_version = "/" + versions[id].version + "/" - if(!window.location.pathname.includes("/latest/") && (latest_version.length > 0 && !window.location.pathname.includes(latest_version))) - document.querySelector("div[data-md-component=announce]").innerHTML = "
You are seeing the docs for a previous version of RAPIDS, click here to go to the latest
" - }; - xhr.send(); - }); diff --git a/0.2/migrating-from-old-versions/index.html b/0.2/migrating-from-old-versions/index.html index b341b6ac..2dcfdc8a 100644 --- a/0.2/migrating-from-old-versions/index.html +++ b/0.2/migrating-from-old-versions/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@
+ + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/overrides/main.html b/0.2/overrides/main.html new file mode 100644 index 00000000..cd72f495 --- /dev/null +++ b/0.2/overrides/main.html @@ -0,0 +1,8 @@ +{% extends "base.html" %} + +{% block outdated %} + You're not viewing the latest stable version of RAPIDS. + + Click here to go to latest. + +{% endblock %} \ No newline at end of file diff --git a/0.2/search/search_index.json b/0.2/search/search_index.json index 1b025ff8..598df7cd 100644 --- a/0.2/search/search_index.json +++ b/0.2/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to RAPIDS documentation \u00b6 Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data and structure your analysis into reproducible workflows. RAPIDS is open source, documented, modular, tested, and reproducible. At the moment we support smartphone data collected with AWARE and wearable data from Fitbit devices. Tip Questions or feedback can be posted on the #rapids channel in AWARE Framework's slack . Bugs and feature requests should be posted on Github . Join our discussions on our algorithms and assumptions for feature processing . Ready to start? Go to Installation , then to Configuration , and then to Execution How does it work? \u00b6 RAPIDS is formed by R and Python scripts orchestrated by Snakemake . We suggest you read Snakemake\u2019s docs but in short: every link in the analysis chain is atomic and has files as input and output. Behavioral features are processed per sensor and per participant. What are the benefits of using RAPIDS? \u00b6 Consistent analysis . Every participant sensor dataset is analyzed in the exact same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own behavioral features in R or Python, share them with the community, and keep authorship and citations. Timezone aware . Your data is adjusted to the specified timezone (multiple timezones suport coming soon ). Flexible time segments . You can extract behavioral features on time windows of any length (e.g. 5 minutes, 3 hours, 2 days), on every day or particular days (e.g. weekends, Mondays, the 1 st of each month, etc.) or around events of interest (e.g. surveys or clinical relapses). Tested code . We are constantly adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended thanks to R and Python virtual environments. You can share your analysis code along your publications without any overhead. Private . All your data is processed locally. How is it organized? \u00b6 In broad terms the config.yaml , .env file , participants files , and time segment files are the only ones that you will have to modify. All data is stored in data/ and all scripts are stored in src/ . For more information see RAPIDS\u2019 File Structure .","title":"Home"},{"location":"#welcome-to-rapids-documentation","text":"Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data and structure your analysis into reproducible workflows. RAPIDS is open source, documented, modular, tested, and reproducible. At the moment we support smartphone data collected with AWARE and wearable data from Fitbit devices. Tip Questions or feedback can be posted on the #rapids channel in AWARE Framework's slack . Bugs and feature requests should be posted on Github . Join our discussions on our algorithms and assumptions for feature processing . Ready to start? Go to Installation , then to Configuration , and then to Execution","title":"Welcome to RAPIDS documentation"},{"location":"#how-does-it-work","text":"RAPIDS is formed by R and Python scripts orchestrated by Snakemake . We suggest you read Snakemake\u2019s docs but in short: every link in the analysis chain is atomic and has files as input and output. Behavioral features are processed per sensor and per participant.","title":"How does it work?"},{"location":"#what-are-the-benefits-of-using-rapids","text":"Consistent analysis . Every participant sensor dataset is analyzed in the exact same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own behavioral features in R or Python, share them with the community, and keep authorship and citations. Timezone aware . Your data is adjusted to the specified timezone (multiple timezones suport coming soon ). Flexible time segments . You can extract behavioral features on time windows of any length (e.g. 5 minutes, 3 hours, 2 days), on every day or particular days (e.g. weekends, Mondays, the 1 st of each month, etc.) or around events of interest (e.g. surveys or clinical relapses). Tested code . We are constantly adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended thanks to R and Python virtual environments. You can share your analysis code along your publications without any overhead. Private . All your data is processed locally.","title":"What are the benefits of using RAPIDS?"},{"location":"#how-is-it-organized","text":"In broad terms the config.yaml , .env file , participants files , and time segment files are the only ones that you will have to modify. All data is stored in data/ and all scripts are stored in src/ . For more information see RAPIDS\u2019 File Structure .","title":"How is it organized?"},{"location":"change-log/","text":"Change Log \u00b6 v0.2.3 \u00b6 Fix participant IDS in the example analysis workflow v0.2.2 \u00b6 Fix readme link to docs v0.2.1 \u00b6 FIx link to the most recent version in the old version banner v0.2.0 \u00b6 Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item v0.1.0 \u00b6 New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"Change Log"},{"location":"change-log/#change-log","text":"","title":"Change Log"},{"location":"change-log/#v023","text":"Fix participant IDS in the example analysis workflow","title":"v0.2.3"},{"location":"change-log/#v022","text":"Fix readme link to docs","title":"v0.2.2"},{"location":"change-log/#v021","text":"FIx link to the most recent version in the old version banner","title":"v0.2.1"},{"location":"change-log/#v020","text":"Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item","title":"v0.2.0"},{"location":"change-log/#v010","text":"New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"v0.1.0"},{"location":"citation/","text":"Cite RAPIDS and providers \u00b6 RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you! RAPIDS \u00b6 If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246 Panda (accelerometer) \u00b6 If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047. Stachl (applications foreground) \u00b6 If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117 Doryab (bluetooth) \u00b6 If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Barnett (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845 Doryab (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Citation"},{"location":"citation/#cite-rapids-and-providers","text":"RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you!","title":"Cite RAPIDS and providers"},{"location":"citation/#rapids","text":"If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246","title":"RAPIDS"},{"location":"citation/#panda-accelerometer","text":"If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047.","title":"Panda (accelerometer)"},{"location":"citation/#stachl-applications-foreground","text":"If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117","title":"Stachl (applications foreground)"},{"location":"citation/#doryab-bluetooth","text":"If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394","title":"Doryab (bluetooth)"},{"location":"citation/#barnett-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Barnett (locations)"},{"location":"citation/#doryab-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Doryab (locations)"},{"location":"code_of_conduct/","text":"Contributor Covenant Code of Conduct \u00b6 Our Pledge \u00b6 We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards \u00b6 Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting Enforcement Responsibilities \u00b6 Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope \u00b6 This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement \u00b6 Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines \u00b6 Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction \u00b6 Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning \u00b6 Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban \u00b6 Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban \u00b6 Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community. Attribution \u00b6 This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Code of Conduct"},{"location":"code_of_conduct/#contributor-covenant-code-of-conduct","text":"","title":"Contributor Covenant Code of Conduct"},{"location":"code_of_conduct/#our-pledge","text":"We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.","title":"Our Pledge"},{"location":"code_of_conduct/#our-standards","text":"Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting","title":"Our Standards"},{"location":"code_of_conduct/#enforcement-responsibilities","text":"Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.","title":"Enforcement Responsibilities"},{"location":"code_of_conduct/#scope","text":"This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.","title":"Scope"},{"location":"code_of_conduct/#enforcement","text":"Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident.","title":"Enforcement"},{"location":"code_of_conduct/#enforcement-guidelines","text":"Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:","title":"Enforcement Guidelines"},{"location":"code_of_conduct/#1-correction","text":"Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.","title":"1. Correction"},{"location":"code_of_conduct/#2-warning","text":"Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.","title":"2. Warning"},{"location":"code_of_conduct/#3-temporary-ban","text":"Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.","title":"3. Temporary Ban"},{"location":"code_of_conduct/#4-permanent-ban","text":"Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community.","title":"4. Permanent Ban"},{"location":"code_of_conduct/#attribution","text":"This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Attribution"},{"location":"faq/","text":"Frequently Asked Questions \u00b6 Cannot connect to your MySQL server \u00b6 Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env . Cannot start mysql in linux via brew services start mysql \u00b6 Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start Every time I run force the download_dataset rule all rules are executed \u00b6 Problem When running snakemake -j1 -R download_phone_data or ./rapids -j1 -R download_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed. Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule. \u00b6 Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [TABLE] of each sensor you activated in config.yaml match your database tables. How do I install RAPIDS on Ubuntu 16.04 \u00b6 Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion. mysql.h cannot be found \u00b6 Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev No package libcurl found \u00b6 Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev Configuration failed because openssl was not found. \u00b6 Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev Configuration failed because libxml-2.0 was not found \u00b6 Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev SSL connection error when running RAPIDS \u00b6 Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here DB_TABLES key not found \u00b6 Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : [] Error while updating your conda environment in Ubuntu \u00b6 Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda Embedded nul in string \u00b6 Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: If it\u2019s only a few rows that are causing this problem, remove the the null character from the conflictive table cell. If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Liunx: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/download_phone_data.R and replace library(RMariaDB) with library(RMySQL) In the same file replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group)","title":"FAQ"},{"location":"faq/#frequently-asked-questions","text":"","title":"Frequently Asked Questions"},{"location":"faq/#cannot-connect-to-your-mysql-server","text":"Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env .","title":"Cannot connect to your MySQL server"},{"location":"faq/#cannot-start-mysql-in-linux-via-brew-services-start-mysql","text":"Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start","title":"Cannot start mysql in linux via brew services start mysql"},{"location":"faq/#every-time-i-run-force-the-download_dataset-rule-all-rules-are-executed","text":"Problem When running snakemake -j1 -R download_phone_data or ./rapids -j1 -R download_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed.","title":"Every time I run force the download_dataset rule all rules are executed"},{"location":"faq/#error-table-xxx-doesnt-exist-while-running-the-download_phone_data-or-download_fitbit_data-rule","text":"Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [TABLE] of each sensor you activated in config.yaml match your database tables.","title":"Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule."},{"location":"faq/#how-do-i-install-rapids-on-ubuntu-1604","text":"Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion.","title":"How do I install RAPIDS on Ubuntu 16.04"},{"location":"faq/#mysqlh-cannot-be-found","text":"Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev","title":"mysql.h cannot be found"},{"location":"faq/#no-package-libcurl-found","text":"Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev","title":"No package libcurl found"},{"location":"faq/#configuration-failed-because-openssl-was-not-found","text":"Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev","title":"Configuration failed because openssl was not found."},{"location":"faq/#configuration-failed-because-libxml-20-was-not-found","text":"Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev","title":"Configuration failed because libxml-2.0 was not found"},{"location":"faq/#ssl-connection-error-when-running-rapids","text":"Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here","title":"SSL connection error when running RAPIDS"},{"location":"faq/#db_tables-key-not-found","text":"Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : []","title":"DB_TABLES key not found"},{"location":"faq/#error-while-updating-your-conda-environment-in-ubuntu","text":"Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda","title":"Error while updating your conda environment in Ubuntu"},{"location":"faq/#embedded-nul-in-string","text":"Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: If it\u2019s only a few rows that are causing this problem, remove the the null character from the conflictive table cell. If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Liunx: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/download_phone_data.R and replace library(RMariaDB) with library(RMySQL) In the same file replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group)","title":"Embedded nul in string"},{"location":"file-structure/","text":"File Structure \u00b6 Tip Read this page if you want to learn more about how RAPIDS is structured. If you want to start using it go to Installation , then to Configuration , and then to Execution All paths mentioned in this page are relative to RAPIDS\u2019 root folder. If you want to extract the behavioral features that RAPIDS offers, you will only have to create or modify the .env file , participants files , time segment files , and the config.yaml file as instructed in the Configuration page . The config.yaml file is the heart of RAPIDS and includes parameters to manage participants, data sources, sensor data, visualizations and more. All data is saved in data/ . The data/external/ folder stores any data imported or created by the user, data/raw/ stores sensor data as imported from your database, data/interim/ has intermediate files necessary to compute behavioral features from raw data, and data/processed/ has all the final files with the behavioral features in folders per participant and sensor. RAPIDS source code is saved in src/ . The src/data/ folder stores scripts to download, clean and pre-process sensor data, src/features has scripts to extract behavioral features organized in their respective sensor subfolders , src/models/ can host any script to create models or statistical analyses with the behavioral features you extract, and src/visualization/ has scripts to create plots of the raw and processed data. There are other files and folders but only relevant if you are interested in extending RAPIDS (e.g. virtual env files, docs, tests, Dockerfile, the Snakefile, etc.). In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS","title":"File Structure"},{"location":"file-structure/#file-structure","text":"Tip Read this page if you want to learn more about how RAPIDS is structured. If you want to start using it go to Installation , then to Configuration , and then to Execution All paths mentioned in this page are relative to RAPIDS\u2019 root folder. If you want to extract the behavioral features that RAPIDS offers, you will only have to create or modify the .env file , participants files , time segment files , and the config.yaml file as instructed in the Configuration page . The config.yaml file is the heart of RAPIDS and includes parameters to manage participants, data sources, sensor data, visualizations and more. All data is saved in data/ . The data/external/ folder stores any data imported or created by the user, data/raw/ stores sensor data as imported from your database, data/interim/ has intermediate files necessary to compute behavioral features from raw data, and data/processed/ has all the final files with the behavioral features in folders per participant and sensor. RAPIDS source code is saved in src/ . The src/data/ folder stores scripts to download, clean and pre-process sensor data, src/features has scripts to extract behavioral features organized in their respective sensor subfolders , src/models/ can host any script to create models or statistical analyses with the behavioral features you extract, and src/visualization/ has scripts to create plots of the raw and processed data. There are other files and folders but only relevant if you are interested in extending RAPIDS (e.g. virtual env files, docs, tests, Dockerfile, the Snakefile, etc.). In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS","title":"File Structure"},{"location":"migrating-from-old-versions/","text":"Migrating from RAPIDS beta \u00b6 If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the new RAPIDS (we are starting with v0.1.0 ) right away, just take into account the following: Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Follow the new Configuration guide. You can copy and paste your old .env file You can migrate your old participant files: python tools/update_format_participant_files.py Get familiar with the new way of Executing RAPIDS You can proceed to reconfigure your config.yaml , its structure is more consistent and should be familiar to you. Info If you have any questions reach out to us on Slack .","title":"Migrating from beta"},{"location":"migrating-from-old-versions/#migrating-from-rapids-beta","text":"If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the new RAPIDS (we are starting with v0.1.0 ) right away, just take into account the following: Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Follow the new Configuration guide. You can copy and paste your old .env file You can migrate your old participant files: python tools/update_format_participant_files.py Get familiar with the new way of Executing RAPIDS You can proceed to reconfigure your config.yaml , its structure is more consistent and should be familiar to you. Info If you have any questions reach out to us on Slack .","title":"Migrating from RAPIDS beta"},{"location":"team/","text":"RAPIDS Team \u00b6 If you are interested in contributing feel free to submit a pull request or contact us. Core Team \u00b6 Julio Vega (Designer and Lead Developer) \u00b6 About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website Meng Li \u00b6 About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile Abhineeth Reddy Kunta \u00b6 About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University. Kwesi Aguillera \u00b6 About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile Echhit Joshi \u00b6 About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile Nicolas Leo \u00b6 About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems. Nikunj Goel \u00b6 About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile Community Contributors \u00b6 Agam Kumar \u00b6 About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile Yasaman S. Sefidgar \u00b6 About Linkedin Profile Advisors \u00b6 Afsaneh Doryab \u00b6 About Personal Website Carissa Low \u00b6 About Profile","title":"Team"},{"location":"team/#rapids-team","text":"If you are interested in contributing feel free to submit a pull request or contact us.","title":"RAPIDS Team"},{"location":"team/#core-team","text":"","title":"Core Team"},{"location":"team/#julio-vega-designer-and-lead-developer","text":"About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website","title":"Julio Vega (Designer and Lead Developer)"},{"location":"team/#meng-li","text":"About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile","title":"Meng Li"},{"location":"team/#abhineeth-reddy-kunta","text":"About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University.","title":"Abhineeth Reddy Kunta"},{"location":"team/#kwesi-aguillera","text":"About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile","title":"Kwesi Aguillera"},{"location":"team/#echhit-joshi","text":"About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile","title":"Echhit Joshi"},{"location":"team/#nicolas-leo","text":"About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems.","title":"Nicolas Leo"},{"location":"team/#nikunj-goel","text":"About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile","title":"Nikunj Goel"},{"location":"team/#community-contributors","text":"","title":"Community Contributors"},{"location":"team/#agam-kumar","text":"About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile","title":"Agam Kumar"},{"location":"team/#yasaman-s-sefidgar","text":"About Linkedin Profile","title":"Yasaman S. Sefidgar"},{"location":"team/#advisors","text":"","title":"Advisors"},{"location":"team/#afsaneh-doryab","text":"About Personal Website","title":"Afsaneh Doryab"},{"location":"team/#carissa-low","text":"About Profile","title":"Carissa Low"},{"location":"developers/documentation/","text":"Documentation \u00b6 We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically. Set up development environment \u00b6 Make sure your conda environment is active pip install mkdocs pip install mkdocs-material Preview \u00b6 Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve File Structure \u00b6 The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation. Reference \u00b6 Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md ) Extras \u00b6 You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Documentation"},{"location":"developers/documentation/#documentation","text":"We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically.","title":"Documentation"},{"location":"developers/documentation/#set-up-development-environment","text":"Make sure your conda environment is active pip install mkdocs pip install mkdocs-material","title":"Set up development environment"},{"location":"developers/documentation/#preview","text":"Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve","title":"Preview"},{"location":"developers/documentation/#file-structure","text":"The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation.","title":"File Structure"},{"location":"developers/documentation/#reference","text":"Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md )","title":"Reference"},{"location":"developers/documentation/#extras","text":"You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Extras"},{"location":"developers/git-flow/","text":"Git Flow \u00b6 We use the develop/master variation of the OneFlow git flow Add New Features \u00b6 We use feature (topic) branches to implement new features Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Integrate your new feature to develop Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. git checkout feature/feature1 git pull origin develop git rebase -i develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Push your feature branch online git push --set-upstream origin feature/external-test Then open a pull request to the develop branch using Github\u2019s GUI Release a New Version \u00b6 Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log) Release a Hotfix \u00b6 Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Git Flow"},{"location":"developers/git-flow/#git-flow","text":"We use the develop/master variation of the OneFlow git flow","title":"Git Flow"},{"location":"developers/git-flow/#add-new-features","text":"We use feature (topic) branches to implement new features Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Integrate your new feature to develop Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. git checkout feature/feature1 git pull origin develop git rebase -i develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Push your feature branch online git push --set-upstream origin feature/external-test Then open a pull request to the develop branch using Github\u2019s GUI","title":"Add New Features"},{"location":"developers/git-flow/#release-a-new-version","text":"Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log)","title":"Release a New Version"},{"location":"developers/git-flow/#release-a-hotfix","text":"Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Release a Hotfix"},{"location":"developers/remote-support/","text":"Remote Support \u00b6 We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open you RAPIDS root folder in a new VSCode window Open a new Terminal Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session 6. Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/remote-support/#remote-support","text":"We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open you RAPIDS root folder in a new VSCode window Open a new Terminal Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session 6. Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/test-cases/","text":"Test Cases \u00b6 Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available. Messages (SMS) \u00b6 The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Calls \u00b6 Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Screen \u00b6 Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Battery \u00b6 Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Bluetooth \u00b6 The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files. WIFI \u00b6 There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files. Light \u00b6 The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Application Foreground \u00b6 The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Activity Recognition \u00b6 The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files. Conversation \u00b6 The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files","title":"Test cases"},{"location":"developers/test-cases/#test-cases","text":"Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available.","title":"Test Cases"},{"location":"developers/test-cases/#messages-sms","text":"The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Messages (SMS)"},{"location":"developers/test-cases/#calls","text":"Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Calls"},{"location":"developers/test-cases/#screen","text":"Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Screen"},{"location":"developers/test-cases/#battery","text":"Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Battery"},{"location":"developers/test-cases/#bluetooth","text":"The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Bluetooth"},{"location":"developers/test-cases/#wifi","text":"There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files.","title":"WIFI"},{"location":"developers/test-cases/#light","text":"The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Light"},{"location":"developers/test-cases/#application-foreground","text":"The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Application Foreground"},{"location":"developers/test-cases/#activity-recognition","text":"The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Activity Recognition"},{"location":"developers/test-cases/#conversation","text":"The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files","title":"Conversation"},{"location":"developers/testing/","text":"Testing \u00b6 The following is a simple guide to testing RAPIDS. All files necessary for testing are stored in the /tests directory Steps for Testing \u00b6 To begin testing RAPIDS place the fake raw input data csv files in tests/data/raw/ . The fake participant files should be placed in tests/data/external/ . The expected output files of RAPIDS after processing the input data should be placed in tests/data/processesd/ . The Snakemake rule(s) that are to be tested must be placed in the tests/Snakemake file. The current tests/Snakemake is a good example of how to define them. (At the time of writing this documentation the snakefile contains rules messages (SMS), calls and screen) Edit the tests/settings/config.yaml . Add and/or remove the rules to be run for testing from the forcerun list. Edit the tests/settings/testing_config.yaml with the necessary configuration settings for running the rules to be tested. Add any additional testscripts in tests/scripts . Uncomment or comment off lines in the testing shell script tests/scripts/run_tests.sh . Run the testing shell script. tests/scripts/run_tests.sh The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... FAIL ====================================================================== FAIL: test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ---------------------------------------------------------------------- The results above show that the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. In addition you should get the traceback of the failure (not shown here). For more information on how to implement test scripts and use unittest please see Unittest Documentation Testing of the RAPIDS sensors and features is a work-in-progress. Please see test-cases for a list of sensors and features that have testing currently available. Currently the repository is set up to test a number of sensors out of the box by simply running the tests/scripts/run_tests.sh command once the RAPIDS python environment is active.","title":"Testing"},{"location":"developers/testing/#testing","text":"The following is a simple guide to testing RAPIDS. All files necessary for testing are stored in the /tests directory","title":"Testing"},{"location":"developers/testing/#steps-for-testing","text":"To begin testing RAPIDS place the fake raw input data csv files in tests/data/raw/ . The fake participant files should be placed in tests/data/external/ . The expected output files of RAPIDS after processing the input data should be placed in tests/data/processesd/ . The Snakemake rule(s) that are to be tested must be placed in the tests/Snakemake file. The current tests/Snakemake is a good example of how to define them. (At the time of writing this documentation the snakefile contains rules messages (SMS), calls and screen) Edit the tests/settings/config.yaml . Add and/or remove the rules to be run for testing from the forcerun list. Edit the tests/settings/testing_config.yaml with the necessary configuration settings for running the rules to be tested. Add any additional testscripts in tests/scripts . Uncomment or comment off lines in the testing shell script tests/scripts/run_tests.sh . Run the testing shell script. tests/scripts/run_tests.sh The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... FAIL ====================================================================== FAIL: test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ---------------------------------------------------------------------- The results above show that the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. In addition you should get the traceback of the failure (not shown here). For more information on how to implement test scripts and use unittest please see Unittest Documentation Testing of the RAPIDS sensors and features is a work-in-progress. Please see test-cases for a list of sensors and features that have testing currently available. Currently the repository is set up to test a number of sensors out of the box by simply running the tests/scripts/run_tests.sh command once the RAPIDS python environment is active.","title":"Steps for Testing"},{"location":"developers/virtual-environments/","text":"Python Virtual Environment \u00b6 Add new packages \u00b6 Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ). Remove packages \u00b6 Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME Update your conda environment.yaml \u00b6 After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' > environment.yml R Virtual Environment \u00b6 Add new packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\") Remove packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\") Update your R renv.lock \u00b6 After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Virtual Environments"},{"location":"developers/virtual-environments/#python-virtual-environment","text":"","title":"Python Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages","text":"Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ).","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages","text":"Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME","title":"Remove packages"},{"location":"developers/virtual-environments/#update-your-conda-environmentyaml","text":"After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' > environment.yml","title":"Update your conda environment.yaml"},{"location":"developers/virtual-environments/#r-virtual-environment","text":"","title":"R Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\")","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\")","title":"Remove packages"},{"location":"developers/virtual-environments/#update-your-r-renvlock","text":"After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Update your R renv.lock"},{"location":"features/add-new-features/","text":"Add New Features \u00b6 Hint We recommend reading the Behavioral Features Introduction before reading this page Hint You won\u2019t have to deal with time zones, dates, times, data cleaning or preprocessing. The data that RAPIDS pipes to your feature extraction code is ready to process. New Features for Existing Sensors \u00b6 You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create a provider folder, script and function Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 in Python and that it requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor is any of the phone or Fitbit sensors with a configuration entry in config.yaml : Phone Accelerometer Phone Activity Recognition Phone Applications Foreground Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Light Phone Locations Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Steps Summary Fitbit Steps Intraday Modify the config.yaml file \u00b6 In this step you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : TABLE : accelerometer PROVIDERS : RAPIDS : COMPUTE : False ... PANDA : COMPUTE : False ... VEGA : COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_FOLDER : \"vega\" SRC_LANGUAGE : \"python\" Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string or an array of any of such types. [SRC_LANGUAGE] The programming language of your provider script, it can be python or r , in our example python [SRC_FOLDER] The name of your provider in lower case, in our example vega (this will be the name of your folder in the next step) Create a provider folder, script and function \u00b6 In this step you need to add a folder, script and function for your provider. Create your provider folder under src/feature/DEVICE_SENSOR/YOUR_PROVIDER , in our example src/feature/phone_accelerometer/vega (same as [SRC_FOLDER] in the step above). Create your provider script inside your provider folder, it can be a Python file called main.py or an R file called main.R . Add your provider function in your provider script. The name of such function should be [providername]_features , in our example vega_features Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): R function [ providername ] _ features <- function ( sensor_data , time_segment , provider ) Implement your feature extraction code \u00b6 The provider function that you created in the step above will receive the following parameters: Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The code to extract your behavioral features should be implemented in your provider function and in general terms it will have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that phone\u2019s battery, screen, and activity recognition data is given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but to undersand why we need it, keep reading. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period of time that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and week-end basis for p01 . The labels are arbritrary and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks and once where time_segment is my_weekends . In this example not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from this number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g. 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. By convention the name of your features should only contain letters or numbers ( feature1 ). RAPIDS will automatically add the right sensor and provider prefix ( phone_accelerometr_vega_ ) PHONE_ACCELEROMETER Provider Example For your reference, this a short example of our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features New Features for Non-Existing Sensors \u00b6 If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), contact us or request it on Slack and we can add the necessary code so you can follow the instructions above.","title":"Add New Features"},{"location":"features/add-new-features/#add-new-features","text":"Hint We recommend reading the Behavioral Features Introduction before reading this page Hint You won\u2019t have to deal with time zones, dates, times, data cleaning or preprocessing. The data that RAPIDS pipes to your feature extraction code is ready to process.","title":"Add New Features"},{"location":"features/add-new-features/#new-features-for-existing-sensors","text":"You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create a provider folder, script and function Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 in Python and that it requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor is any of the phone or Fitbit sensors with a configuration entry in config.yaml : Phone Accelerometer Phone Activity Recognition Phone Applications Foreground Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Light Phone Locations Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Steps Summary Fitbit Steps Intraday","title":"New Features for Existing Sensors"},{"location":"features/add-new-features/#modify-the-configyaml-file","text":"In this step you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : TABLE : accelerometer PROVIDERS : RAPIDS : COMPUTE : False ... PANDA : COMPUTE : False ... VEGA : COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_FOLDER : \"vega\" SRC_LANGUAGE : \"python\" Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string or an array of any of such types. [SRC_LANGUAGE] The programming language of your provider script, it can be python or r , in our example python [SRC_FOLDER] The name of your provider in lower case, in our example vega (this will be the name of your folder in the next step)","title":"Modify the config.yaml file"},{"location":"features/add-new-features/#create-a-provider-folder-script-and-function","text":"In this step you need to add a folder, script and function for your provider. Create your provider folder under src/feature/DEVICE_SENSOR/YOUR_PROVIDER , in our example src/feature/phone_accelerometer/vega (same as [SRC_FOLDER] in the step above). Create your provider script inside your provider folder, it can be a Python file called main.py or an R file called main.R . Add your provider function in your provider script. The name of such function should be [providername]_features , in our example vega_features Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): R function [ providername ] _ features <- function ( sensor_data , time_segment , provider )","title":"Create a provider folder, script and function"},{"location":"features/add-new-features/#implement-your-feature-extraction-code","text":"The provider function that you created in the step above will receive the following parameters: Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The code to extract your behavioral features should be implemented in your provider function and in general terms it will have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that phone\u2019s battery, screen, and activity recognition data is given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but to undersand why we need it, keep reading. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period of time that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and week-end basis for p01 . The labels are arbritrary and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks and once where time_segment is my_weekends . In this example not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from this number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g. 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. By convention the name of your features should only contain letters or numbers ( feature1 ). RAPIDS will automatically add the right sensor and provider prefix ( phone_accelerometr_vega_ ) PHONE_ACCELEROMETER Provider Example For your reference, this a short example of our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features","title":"Implement your feature extraction code"},{"location":"features/add-new-features/#new-features-for-non-existing-sensors","text":"If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), contact us or request it on Slack and we can add the necessary code so you can follow the instructions above.","title":"New Features for Non-Existing Sensors"},{"location":"features/feature-introduction/","text":"Behavioral Features Introduction \u00b6 Every phone or Fitbit sensor has a corresponding config section in config.yaml , these sections follow a similar structure and we\u2019ll use PHONE_ACCELEROMETER as an example to explain this structure. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. Config section example for PHONE_ACCELEROMETER # 1) Config section PHONE_ACCELEROMETER : # 2) Parameters for PHONE_ACCELEROMETER TABLE : accelerometer # 3) Providers for PHONE_ACCELEROMETER PROVIDERS : # 4) RAPIDS provider RAPIDS : # 4.1) Parameters of RAPIDS provider of PHONE_ACCELEROMETER COMPUTE : False # 4.2) Features of RAPIDS provider of PHONE_ACCELEROMETER FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_FOLDER : \"rapids\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" # 5) PANDA provider PANDA : # 5.1) Parameters of PANDA provider of PHONE_ACCELEROMETER COMPUTE : False VALID_SENSED_MINUTES : False # 5.2) Features of PANDA provider of PHONE_ACCELEROMETER FEATURES : exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] SRC_FOLDER : \"panda\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" Sensor Parameters \u00b6 Each sensor configuration section has a \u201cparameters\u201d subsection (see #2 in the example). These are parameters that affect different aspects of how the raw data is downloaded, and processed. The TABLE parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers \u00b6 Each sensor configuration section can have zero, one or more behavioral feature providers (see #3 in the example). A provider is a script created by the core RAPIDS team or other researchers that extracts behavioral features for that sensor. In this example, accelerometer has two providers: RAPIDS (see #4 ) and PANDA (see #5 ). Provider Parameters \u00b6 Each provider has parameters that affect the computation of the behavioral features it offers (see #4.1 or #5.1 in the example). These parameters will include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. Provider Features \u00b6 Each provider offers a set of behavioral features (see #4.2 or #5.2 in the example). For some providers these features are grouped in an array (like those for RAPIDS provider in #4.2 ) but for others they are grouped in a collection of arrays depending on the meaning and purpose of those features (like those for PANDAS provider in #5.2 ). In either case, you can delete the features you are not interested in and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page.","title":"Introduction"},{"location":"features/feature-introduction/#behavioral-features-introduction","text":"Every phone or Fitbit sensor has a corresponding config section in config.yaml , these sections follow a similar structure and we\u2019ll use PHONE_ACCELEROMETER as an example to explain this structure. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. Config section example for PHONE_ACCELEROMETER # 1) Config section PHONE_ACCELEROMETER : # 2) Parameters for PHONE_ACCELEROMETER TABLE : accelerometer # 3) Providers for PHONE_ACCELEROMETER PROVIDERS : # 4) RAPIDS provider RAPIDS : # 4.1) Parameters of RAPIDS provider of PHONE_ACCELEROMETER COMPUTE : False # 4.2) Features of RAPIDS provider of PHONE_ACCELEROMETER FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_FOLDER : \"rapids\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" # 5) PANDA provider PANDA : # 5.1) Parameters of PANDA provider of PHONE_ACCELEROMETER COMPUTE : False VALID_SENSED_MINUTES : False # 5.2) Features of PANDA provider of PHONE_ACCELEROMETER FEATURES : exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] SRC_FOLDER : \"panda\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\"","title":"Behavioral Features Introduction"},{"location":"features/feature-introduction/#sensor-parameters","text":"Each sensor configuration section has a \u201cparameters\u201d subsection (see #2 in the example). These are parameters that affect different aspects of how the raw data is downloaded, and processed. The TABLE parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page.","title":"Sensor Parameters"},{"location":"features/feature-introduction/#sensor-providers","text":"Each sensor configuration section can have zero, one or more behavioral feature providers (see #3 in the example). A provider is a script created by the core RAPIDS team or other researchers that extracts behavioral features for that sensor. In this example, accelerometer has two providers: RAPIDS (see #4 ) and PANDA (see #5 ).","title":"Sensor Providers"},{"location":"features/feature-introduction/#provider-parameters","text":"Each provider has parameters that affect the computation of the behavioral features it offers (see #4.1 or #5.1 in the example). These parameters will include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page.","title":"Provider Parameters"},{"location":"features/feature-introduction/#provider-features","text":"Each provider offers a set of behavioral features (see #4.2 or #5.2 in the example). For some providers these features are grouped in an array (like those for RAPIDS provider in #4.2 ) but for others they are grouped in a collection of arrays depending on the meaning and purpose of those features (like those for PANDAS provider in #5.2 ). In either case, you can delete the features you are not interested in and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page.","title":"Provider Features"},{"location":"features/fitbit-heartrate-intraday/","text":"Fitbit Heart Rate Intraday \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [TABLE] Database table name or file path where the heart rate intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#fitbit-heart-rate-intraday","text":"Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [TABLE] Database table name or file path where the heart rate intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"RAPIDS provider"},{"location":"features/fitbit-heartrate-summary/","text":"Fitbit Heart Rate Summary \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [TABLE] Database table name or file path where the heart rate summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#fitbit-heart-rate-summary","text":"Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [TABLE] Database table name or file path where the heart rate summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"RAPIDS provider"},{"location":"features/fitbit-sleep-summary/","text":"Fitbit Sleep Summary \u00b6 Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [TABLE] Database table name or file path where the sleep summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data with Fitbit\u2019s sleep API Version 1 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 2, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 10, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 8100000, \u201cefficiency\u201d: 91, \u201cendTime\u201d: \u201c2020-10-07T18:10:00.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14147921940, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c15:55:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:56:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:57:00\u201d, \u201cvalue\u201d: \u201c2\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 123, \u201cminutesAwake\u201d: 12, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 8, \u201crestlessDuration\u201d: 9, \u201cstartTime\u201d: \u201c2020-10-07T15:55:00.000\u201d, \u201ctimeInBed\u201d: 135}, {\u201cawakeCount\u201d: 0, \u201cawakeDuration\u201d: 0, \u201cawakeningsCount\u201d: 1, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 3780000, \u201cefficiency\u201d: 100, \u201cendTime\u201d: \u201c2020-10-07T10:52:30.000\u201d, \u201cisMainSleep\u201d: false, \u201clogId\u201d: 14144903977, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c09:49:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:50:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:51:00\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 1, \u201cminutesAsleep\u201d: 62, \u201cminutesAwake\u201d: 0, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 1, \u201crestlessDuration\u201d: 1, \u201cstartTime\u201d: \u201c2020-10-07T09:49:00.000\u201d, \u201ctimeInBed\u201d: 63}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 185, \u201ctotalSleepRecords\u201d: 2, \u201ctotalTimeInBed\u201d: 198}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 3, \u201cawakeDuration\u201d: 21, \u201cawakeningsCount\u201d: 16, \u201cdateOfSleep\u201d: \u201c2020-10-08\u201d, \u201cduration\u201d: 19260000, \u201cefficiency\u201d: 89, \u201cendTime\u201d: \u201c2020-10-08T06:01:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14150613895, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:40:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:41:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:42:00\u201d, \u201cvalue\u201d: \u201c3\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 275, \u201cminutesAwake\u201d: 33, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 13, \u201crestlessDuration\u201d: 25, \u201cstartTime\u201d: \u201c2020-10-08T00:40:00.000\u201d, \u201ctimeInBed\u201d: 321}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 275, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 321}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 1, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 8, \u201cdateOfSleep\u201d: \u201c2020-10-09\u201d, \u201cduration\u201d: 19320000, \u201cefficiency\u201d: 96, \u201cendTime\u201d: \u201c2020-10-09T05:57:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14161136803, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:35:30\u201d, \u201cvalue\u201d: \u201c2\u201d}, {\u201cdateTime\u201d: \u201c00:36:30\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c00:37:30\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 309, \u201cminutesAwake\u201d: 13, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 7, \u201crestlessDuration\u201d: 10, \u201cstartTime\u201d: \u201c2020-10-09T00:35:30.000\u201d, \u201ctimeInBed\u201d: 322}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 309, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 322}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type count_awake duration_awake count_awakenings count_restless duration_restless a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 15:55:00 2020-10-07 18:10:00 91 0 123 12 0 135 1 classic 2 3 10 8 9 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 09:49:00 2020-10-07 10:52:30 100 1 62 0 0 63 0 classic 0 0 1 1 1 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 00:40:00 2020-10-08 06:01:30 89 0 275 33 0 321 1 classic 3 21 16 13 25 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 00:35:30 2020-10-09 05:57:30 96 0 309 13 0 322 1 classic 1 3 8 7 10 Example of the structure of source data with Fitbit\u2019s sleep API Version 1.2 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. Fitbit provides 3 types of sleep: main , nap , all . [FEATURES] Features to be computed from sleep summary data, see table below Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations There are three sleep types (TYPE): main , nap , all . The all type contains both main sleep and naps. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type .","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#fitbit-sleep-summary","text":"Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [TABLE] Database table name or file path where the sleep summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data with Fitbit\u2019s sleep API Version 1 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 2, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 10, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 8100000, \u201cefficiency\u201d: 91, \u201cendTime\u201d: \u201c2020-10-07T18:10:00.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14147921940, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c15:55:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:56:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:57:00\u201d, \u201cvalue\u201d: \u201c2\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 123, \u201cminutesAwake\u201d: 12, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 8, \u201crestlessDuration\u201d: 9, \u201cstartTime\u201d: \u201c2020-10-07T15:55:00.000\u201d, \u201ctimeInBed\u201d: 135}, {\u201cawakeCount\u201d: 0, \u201cawakeDuration\u201d: 0, \u201cawakeningsCount\u201d: 1, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 3780000, \u201cefficiency\u201d: 100, \u201cendTime\u201d: \u201c2020-10-07T10:52:30.000\u201d, \u201cisMainSleep\u201d: false, \u201clogId\u201d: 14144903977, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c09:49:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:50:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:51:00\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 1, \u201cminutesAsleep\u201d: 62, \u201cminutesAwake\u201d: 0, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 1, \u201crestlessDuration\u201d: 1, \u201cstartTime\u201d: \u201c2020-10-07T09:49:00.000\u201d, \u201ctimeInBed\u201d: 63}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 185, \u201ctotalSleepRecords\u201d: 2, \u201ctotalTimeInBed\u201d: 198}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 3, \u201cawakeDuration\u201d: 21, \u201cawakeningsCount\u201d: 16, \u201cdateOfSleep\u201d: \u201c2020-10-08\u201d, \u201cduration\u201d: 19260000, \u201cefficiency\u201d: 89, \u201cendTime\u201d: \u201c2020-10-08T06:01:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14150613895, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:40:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:41:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:42:00\u201d, \u201cvalue\u201d: \u201c3\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 275, \u201cminutesAwake\u201d: 33, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 13, \u201crestlessDuration\u201d: 25, \u201cstartTime\u201d: \u201c2020-10-08T00:40:00.000\u201d, \u201ctimeInBed\u201d: 321}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 275, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 321}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 1, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 8, \u201cdateOfSleep\u201d: \u201c2020-10-09\u201d, \u201cduration\u201d: 19320000, \u201cefficiency\u201d: 96, \u201cendTime\u201d: \u201c2020-10-09T05:57:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14161136803, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:35:30\u201d, \u201cvalue\u201d: \u201c2\u201d}, {\u201cdateTime\u201d: \u201c00:36:30\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c00:37:30\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 309, \u201cminutesAwake\u201d: 13, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 7, \u201crestlessDuration\u201d: 10, \u201cstartTime\u201d: \u201c2020-10-09T00:35:30.000\u201d, \u201ctimeInBed\u201d: 322}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 309, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 322}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type count_awake duration_awake count_awakenings count_restless duration_restless a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 15:55:00 2020-10-07 18:10:00 91 0 123 12 0 135 1 classic 2 3 10 8 9 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 09:49:00 2020-10-07 10:52:30 100 1 62 0 0 63 0 classic 0 0 1 1 1 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 00:40:00 2020-10-08 06:01:30 89 0 275 33 0 321 1 classic 3 21 16 13 25 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 00:35:30 2020-10-09 05:57:30 96 0 309 13 0 322 1 classic 1 3 8 7 10 Example of the structure of source data with Fitbit\u2019s sleep API Version 1.2 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. Fitbit provides 3 types of sleep: main , nap , all . [FEATURES] Features to be computed from sleep summary data, see table below Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations There are three sleep types (TYPE): main , nap , all . The all type contains both main sleep and naps. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type .","title":"RAPIDS provider"},{"location":"features/fitbit-steps-intraday/","text":"Fitbit Steps Intraday \u00b6 Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [TABLE] Database table name or file path where the steps intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0 RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#fitbit-steps-intraday","text":"Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [TABLE] Database table name or file path where the steps intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"RAPIDS provider"},{"location":"features/fitbit-steps-summary/","text":"Fitbit Steps Summary \u00b6 Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [TABLE] Database table name or file path where the steps summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_parsed.csv - data/raw/ { pid } /fitbit_steps_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#fitbit-steps-summary","text":"Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [TABLE] Database table name or file path where the steps summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_parsed.csv - data/raw/ { pid } /fitbit_steps_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/","text":"Phone Accelerometer \u00b6 Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [TABLE] Database table where the accelerometer data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. PANDA provider \u00b6 These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#phone-accelerometer","text":"Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [TABLE] Database table where the accelerometer data is stored","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem.","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/#panda-provider","text":"These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"PANDA provider"},{"location":"features/phone-activity-recognition/","text":"Phone Activity Recognition \u00b6 Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [TABLE][ANDROID] Database table where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime_unified.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#phone-activity-recognition","text":"Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [TABLE][ANDROID] Database table where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime_unified.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"RAPIDS provider"},{"location":"features/phone-applications-foreground/","text":"Phone Applications Foreground \u00b6 Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [TABLE] Database table where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) RAPIDS provider \u00b6 The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#phone-applications-foreground","text":"Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [TABLE] Database table where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored)","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#rapids-provider","text":"The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"RAPIDS provider"},{"location":"features/phone-battery/","text":"Phone Battery \u00b6 Sensor parameters description for [PHONE_BATTERY] : Key Description [TABLE] Database table where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"Phone Battery"},{"location":"features/phone-battery/#phone-battery","text":"Sensor parameters description for [PHONE_BATTERY] : Key Description [TABLE] Database table where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode","title":"Phone Battery"},{"location":"features/phone-battery/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/","text":"Phone Bluetooth \u00b6 Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [TABLE] Database table where the bluetooth data is stored RAPIDS provider \u00b6 Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider DORYAB provider \u00b6 This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#phone-bluetooth","text":"Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [TABLE] Database table where the bluetooth data is stored","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#rapids-provider","text":"Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/#doryab-provider","text":"This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"DORYAB provider"},{"location":"features/phone-calls/","text":"Phone Calls \u00b6 Sensor parameters description for [PHONE_CALLS] : Key Description [TABLE] Database table where the calls data is stored RAPIDS Provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/raw/ { pid } /phone_calls_with_datetime_unified.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"Phone Calls"},{"location":"features/phone-calls/#phone-calls","text":"Sensor parameters description for [PHONE_CALLS] : Key Description [TABLE] Database table where the calls data is stored","title":"Phone Calls"},{"location":"features/phone-calls/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/raw/ { pid } /phone_calls_with_datetime_unified.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"RAPIDS Provider"},{"location":"features/phone-conversation/","text":"Phone Conversation \u00b6 Sensor parameters description for [PHONE_CONVERSATION] : Key Description [TABLE][ANDROID] Database table where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/raw/ { pid } /phone_conversation_with_datetime_unified.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"Phone Conversation"},{"location":"features/phone-conversation/#phone-conversation","text":"Sensor parameters description for [PHONE_CONVERSATION] : Key Description [TABLE][ANDROID] Database table where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS)","title":"Phone Conversation"},{"location":"features/phone-conversation/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/raw/ { pid } /phone_conversation_with_datetime_unified.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"RAPIDS provider"},{"location":"features/phone-data-yield/","text":"Phone Data Yield \u00b6 This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_FOREGROUND PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_MESSAGES PHONE_LIGHT PHONE_LOCATIONS PHONE_SCREEN PHONE_WIFI_VISIBLE PHONE_WIFI_CONNECTED RAPIDS provider \u00b6 Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes rows The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours lux The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#phone-data-yield","text":"This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_FOREGROUND PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_MESSAGES PHONE_LIGHT PHONE_LOCATIONS PHONE_SCREEN PHONE_WIFI_VISIBLE PHONE_WIFI_CONNECTED","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#rapids-provider","text":"Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes rows The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours lux The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"RAPIDS provider"},{"location":"features/phone-light/","text":"Phone Light \u00b6 Sensor parameters description for [PHONE_LIGHT] : Key Description [TABLE] Database table where the light data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"Phone Light"},{"location":"features/phone-light/#phone-light","text":"Sensor parameters description for [PHONE_LIGHT] : Key Description [TABLE] Database table where the light data is stored","title":"Phone Light"},{"location":"features/phone-light/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-locations/","text":"Phone Locations \u00b6 Sensor parameters description for [PHONE_LOCATIONS] : Key Description [TABLE] Database table where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS or FUSED_RESAMPLED . This filter is based on the provider column of the AWARE locations table, ALL includes every row, GPS only includes rows where provider is gps, and FUSED_RESAMPLED only includes rows where provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use AWARE Android and iOS clients can collect location coordinates through the phone's GPS, the network cellular towers around the phone or Google's fused location API. If you want to use only the GPS provider set [LOCATIONS_TO_USE] to GPS , if you want to use all providers (not recommended due to the difference in accuracy) set [LOCATIONS_TO_USE] to ALL , if your AWARE client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . RESAMPLE_FUSED takes the original fused location coordinates and replicates each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] , this is done because Google's API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair (for example, participant A's phone did not collect data between 10.30am and 10:50am and between 11:05am and 11:40am, the last known coordinate pair will be replicated during the first period but not the second, in other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes). FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer that this threshold even if the phone was sensing data continuously (for example, participant A went home at 9pm and their phone was sensing data without gaps until 11am the next morning, the last known location will only be replicated until 9am). If you have suggestions to modify or improve this resampling, let us know. BARNETT provider \u00b6 These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same day (daily segments) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [TIMEZONE] Timezone where the location data was collected. By default points to the one defined in the Configuration [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am including any pauses within a 200-meter radius. disttravelled meters Total distance travelled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Barnett's et al features These features are based on a Pause-Flight model. A pause is defined as a mobiity trace (location pings) within a certain duration and distance (by default 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS we only expose two parameters for these features (timezone and accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) will count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al . DORYAB provider \u00b6 These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_GAP_ALLOWED] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the the phone was not sensing. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [SAMPLING_FREQUENCY] Expected time difference between any two location rows in minutes. If set to 0 , the sampling frequency will be inferred automatically as the median of all the differences between any two consecutive row timestamps (recommended if you are using FUSED_RESAMPLED data). This parameter impacts all the time calculations. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance travelled in a time segment using the haversine formula. averagespeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - \"It encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between the number of rows labeled Moving versus Static outlierstimepercent - Ratio between the number of rows that belong to non-significant clusters divided by the total number of rows in a time segment. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). meanlengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters, it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"Phone Locations"},{"location":"features/phone-locations/#phone-locations","text":"Sensor parameters description for [PHONE_LOCATIONS] : Key Description [TABLE] Database table where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS or FUSED_RESAMPLED . This filter is based on the provider column of the AWARE locations table, ALL includes every row, GPS only includes rows where provider is gps, and FUSED_RESAMPLED only includes rows where provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use AWARE Android and iOS clients can collect location coordinates through the phone's GPS, the network cellular towers around the phone or Google's fused location API. If you want to use only the GPS provider set [LOCATIONS_TO_USE] to GPS , if you want to use all providers (not recommended due to the difference in accuracy) set [LOCATIONS_TO_USE] to ALL , if your AWARE client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . RESAMPLE_FUSED takes the original fused location coordinates and replicates each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] , this is done because Google's API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair (for example, participant A's phone did not collect data between 10.30am and 10:50am and between 11:05am and 11:40am, the last known coordinate pair will be replicated during the first period but not the second, in other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes). FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer that this threshold even if the phone was sensing data continuously (for example, participant A went home at 9pm and their phone was sensing data without gaps until 11am the next morning, the last known location will only be replicated until 9am). If you have suggestions to modify or improve this resampling, let us know.","title":"Phone Locations"},{"location":"features/phone-locations/#barnett-provider","text":"These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same day (daily segments) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [TIMEZONE] Timezone where the location data was collected. By default points to the one defined in the Configuration [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am including any pauses within a 200-meter radius. disttravelled meters Total distance travelled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Barnett's et al features These features are based on a Pause-Flight model. A pause is defined as a mobiity trace (location pings) within a certain duration and distance (by default 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS we only expose two parameters for these features (timezone and accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) will count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"BARNETT provider"},{"location":"features/phone-locations/#doryab-provider","text":"These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_GAP_ALLOWED] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the the phone was not sensing. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [SAMPLING_FREQUENCY] Expected time difference between any two location rows in minutes. If set to 0 , the sampling frequency will be inferred automatically as the median of all the differences between any two consecutive row timestamps (recommended if you are using FUSED_RESAMPLED data). This parameter impacts all the time calculations. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance travelled in a time segment using the haversine formula. averagespeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - \"It encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between the number of rows labeled Moving versus Static outlierstimepercent - Ratio between the number of rows that belong to non-significant clusters divided by the total number of rows in a time segment. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). meanlengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters, it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"DORYAB provider"},{"location":"features/phone-messages/","text":"Phone Messages \u00b6 Sensor parameters description for [PHONE_MESSAGES] : Key Description [TABLE] Database table where the messages data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"Phone Messages"},{"location":"features/phone-messages/#phone-messages","text":"Sensor parameters description for [PHONE_MESSAGES] : Key Description [TABLE] Database table where the messages data is stored","title":"Phone Messages"},{"location":"features/phone-messages/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"RAPIDS provider"},{"location":"features/phone-screen/","text":"Phone Screen \u00b6 Sensor parameters description for [PHONE_SCREEN] : Key Description [TABLE] Database table where the screen data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/raw/ { pid } /phone_screen_with_datetime_unified.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes |firstuseafter |minutes |Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"Phone Screen"},{"location":"features/phone-screen/#phone-screen","text":"Sensor parameters description for [PHONE_SCREEN] : Key Description [TABLE] Database table where the screen data is stored","title":"Phone Screen"},{"location":"features/phone-screen/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/raw/ { pid } /phone_screen_with_datetime_unified.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes |firstuseafter |minutes |Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"RAPIDS provider"},{"location":"features/phone-wifi-connected/","text":"Phone WiFi Connected \u00b6 Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [TABLE] Database table where the wifi (connected) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"Phone WiFI Connected"},{"location":"features/phone-wifi-connected/#phone-wifi-connected","text":"Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [TABLE] Database table where the wifi (connected) data is stored","title":"Phone WiFi Connected"},{"location":"features/phone-wifi-connected/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"RAPIDS provider"},{"location":"features/phone-wifi-visible/","text":"Phone WiFi Visible \u00b6 Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [TABLE] Database table where the wifi (visible) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"Phone WiFI Visible"},{"location":"features/phone-wifi-visible/#phone-wifi-visible","text":"Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [TABLE] Database table where the wifi (visible) data is stored","title":"Phone WiFi Visible"},{"location":"features/phone-wifi-visible/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"RAPIDS provider"},{"location":"setup/configuration/","text":"Configuration \u00b6 You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features Add your database credentials Choose the timezone of your study Create your participants files Select what time segments you want to extract features on Modify your device data source configuration Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs we are referring to the corresponding key in the config.yaml file. Database credentials \u00b6 Create an empty file called .env in your RAPIDS root directory Add the following lines and replace your database-specific credentials (user, password, host, and database): [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE Warning The label MY_GROUP is arbitrary but it has to match the following config.yaml key: DATABASE_GROUP : &database_group MY_GROUP Note You can ignore this step if you are only processing Fitbit data in CSV files. Timezone of your study \u00b6 Single timezone \u00b6 If your study only happened in a single time zone, select the appropriate code form this list and change the following config key. Double check your timezone code pick, for example US Eastern Time is America/New_York not EST TIMEZONE : &timezone America/New_York Multiple timezones \u00b6 Support coming soon. Participant files \u00b6 Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Note The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Tip Attribute values of the [PHONE] and [FITBIT] sections in every participant file are optional which allows you to analyze data from participants that only carried smartphones, only Fitbit devices, or both. Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you can run the following command and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py Structure of participants files \u00b6 Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, and a fitbit device throughout the study between Apr 23 rd 2020 and Oct 28 th 2020 PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 For [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study, in this case, data from all their devices will be joined and relabeled with the last 1 on this list. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). If you have an aware_device table in your database you can set [PLATFORMS]: [multiple] and RAPIDS will infer them automatically. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis For [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study, in this case, data from all devices will be joined and relabeled with the last device_id on this list. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis Automatic creation of participant files \u00b6 You have two options a) use the aware_device table in your database or b) use a CSV file. In either case, in your config.yaml , set [PHONE_SECTION][ADD] or [FITBIT_SECTION][ADD] to TRUE depending on what devices you used in your study. Set [DEVICE_ID_COLUMN] to the name of the column that uniquely identifies each device and include any device ids you want to ignore in [IGNORED_DEVICE_IDS] . aware_device table Set the following keys in your config.yaml CREATE_PARTICIPANT_FILES : SOURCE : TYPE : AWARE_DEVICE_TABLE DATABASE_GROUP : *database_group CSV_FILE_PATH : \"\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Then run snakemake -j1 create_participants_files CSV file Set the following keys in your config.yaml . CREATE_PARTICIPANT_FILES : SOURCE : TYPE : CSV_FILE DATABASE_GROUP : \"\" CSV_FILE_PATH : \"your_path/to_your.csv\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Your CSV file ( [SOURCE][CSV_FILE_PATH] ) should have the following columns but you can omit any values you don\u2019t have on each column: Column Description phone device id The name of this column has to match [PHONE_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; fitbit device id The name of this column has to match [FITBIT_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; pid Unique identifiers with the format pXXX (your participant files will be named with this string platform Use android , ios or multiple as explained above, separate values with ; label A human readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD . end_date A string with format YYY-MM-DD . Example device_id,pid,label,platform,start_date,end_date,fitbit_id a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43,p01,julio,android;ios,2020-01-01,2021-01-01,fitbit1 4c4cf7a1-0340-44bc-be0f-d5053bf7390c,p02,meng,ios,2021-01-01,2022-01-01,fitbit2 Then run snakemake -j1 create_participants_files Time Segments \u00b6 Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data on every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed on every day and all have the same duration (for example 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day, or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past as to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday March 7 th 2020 and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would start on Sunday March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment.It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes and seconds. For example 7D 23H 59M 59S repeats_on One of the follow options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segments instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and will last 23:59:59 , the other four segments will start at 6am, 12pm, 6pm, and 12am respectively and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute but they can be as long as you want. The start of each segment can be shifted backwards or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be computed across all these segments). There cannot be two overlaping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes, and seconds. For example 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes and seconds. For example 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyse. If you have participants with multiple device ids you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes and stress2 and stress4 are shifted backwards by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some necessary information to compute a few of such features will be extracted from all three segments, for example the phone contact that called a participant the most or the location clusters visited by a participant. Segment Examples \u00b6 5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 Device Data Source Configuration \u00b6 You might need to modify the following config keys in your config.yaml depending on what devices your participants used and where you are storing your data. You can ignore [PHONE_DATA_CONFIGURATION] or [FITBIT_DATA_CONFIGURATION] if you are not working with either devices. Phone The relevant config.yaml section looks like this by default: PHONE_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # SINGLE (MULTIPLE support coming soon) VALUE : *timezone Parameters for [PHONE_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] Only DATABASE is supported (phone data will be pulled from a database) [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify smartphones. For data collected with AWARE this is usually device_id [TIMEZONE] [TYPE] Only SINGLE is supported for now [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Fitbit The relevant config.yaml section looks like this by default: FITBIT_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE # DATABASE or FILES (set each [FITBIT_SENSOR][TABLE] attribute with a table name or a file path accordingly) COLUMN_FORMAT : JSON # JSON or PLAIN_TEXT DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # Fitbit devices don't support time zones so we read this data in the timezone indicated by VALUE VALUE : *timezone Parameters for For [FITBIT_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] DATABASE or FILES (set each [FITBIT_SENSOR] [TABLE] attribute accordingly with a table name or a file path) [SOURCE] [COLUMN_FORMAT] JSON or PLAIN_TEXT . Column format of the source data. If you pulled your data directly from the Fitbit API the column containing the sensor data will be in JSON format [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials . Only used if [TYPE] is DATABASE . [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify Fitbit devices. [TIMEZONE] [TYPE] Only SINGLE is supported (Fitbit devices always store data in local time). [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Sensor and Features to Process \u00b6 Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Configuration"},{"location":"setup/configuration/#configuration","text":"You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features Add your database credentials Choose the timezone of your study Create your participants files Select what time segments you want to extract features on Modify your device data source configuration Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs we are referring to the corresponding key in the config.yaml file.","title":"Configuration"},{"location":"setup/configuration/#database-credentials","text":"Create an empty file called .env in your RAPIDS root directory Add the following lines and replace your database-specific credentials (user, password, host, and database): [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE Warning The label MY_GROUP is arbitrary but it has to match the following config.yaml key: DATABASE_GROUP : &database_group MY_GROUP Note You can ignore this step if you are only processing Fitbit data in CSV files.","title":"Database credentials"},{"location":"setup/configuration/#timezone-of-your-study","text":"","title":"Timezone of your study"},{"location":"setup/configuration/#single-timezone","text":"If your study only happened in a single time zone, select the appropriate code form this list and change the following config key. Double check your timezone code pick, for example US Eastern Time is America/New_York not EST TIMEZONE : &timezone America/New_York","title":"Single timezone"},{"location":"setup/configuration/#multiple-timezones","text":"Support coming soon.","title":"Multiple timezones"},{"location":"setup/configuration/#participant-files","text":"Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Note The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Tip Attribute values of the [PHONE] and [FITBIT] sections in every participant file are optional which allows you to analyze data from participants that only carried smartphones, only Fitbit devices, or both. Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you can run the following command and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py","title":"Participant files"},{"location":"setup/configuration/#structure-of-participants-files","text":"Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, and a fitbit device throughout the study between Apr 23 rd 2020 and Oct 28 th 2020 PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 For [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study, in this case, data from all their devices will be joined and relabeled with the last 1 on this list. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). If you have an aware_device table in your database you can set [PLATFORMS]: [multiple] and RAPIDS will infer them automatically. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis For [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study, in this case, data from all devices will be joined and relabeled with the last device_id on this list. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis","title":"Structure of participants files"},{"location":"setup/configuration/#automatic-creation-of-participant-files","text":"You have two options a) use the aware_device table in your database or b) use a CSV file. In either case, in your config.yaml , set [PHONE_SECTION][ADD] or [FITBIT_SECTION][ADD] to TRUE depending on what devices you used in your study. Set [DEVICE_ID_COLUMN] to the name of the column that uniquely identifies each device and include any device ids you want to ignore in [IGNORED_DEVICE_IDS] . aware_device table Set the following keys in your config.yaml CREATE_PARTICIPANT_FILES : SOURCE : TYPE : AWARE_DEVICE_TABLE DATABASE_GROUP : *database_group CSV_FILE_PATH : \"\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Then run snakemake -j1 create_participants_files CSV file Set the following keys in your config.yaml . CREATE_PARTICIPANT_FILES : SOURCE : TYPE : CSV_FILE DATABASE_GROUP : \"\" CSV_FILE_PATH : \"your_path/to_your.csv\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Your CSV file ( [SOURCE][CSV_FILE_PATH] ) should have the following columns but you can omit any values you don\u2019t have on each column: Column Description phone device id The name of this column has to match [PHONE_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; fitbit device id The name of this column has to match [FITBIT_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; pid Unique identifiers with the format pXXX (your participant files will be named with this string platform Use android , ios or multiple as explained above, separate values with ; label A human readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD . end_date A string with format YYY-MM-DD . Example device_id,pid,label,platform,start_date,end_date,fitbit_id a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43,p01,julio,android;ios,2020-01-01,2021-01-01,fitbit1 4c4cf7a1-0340-44bc-be0f-d5053bf7390c,p02,meng,ios,2021-01-01,2022-01-01,fitbit2 Then run snakemake -j1 create_participants_files","title":"Automatic creation of participant files"},{"location":"setup/configuration/#time-segments","text":"Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data on every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed on every day and all have the same duration (for example 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day, or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past as to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday March 7 th 2020 and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would start on Sunday March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment.It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes and seconds. For example 7D 23H 59M 59S repeats_on One of the follow options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segments instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and will last 23:59:59 , the other four segments will start at 6am, 12pm, 6pm, and 12am respectively and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute but they can be as long as you want. The start of each segment can be shifted backwards or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be computed across all these segments). There cannot be two overlaping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes, and seconds. For example 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes and seconds. For example 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyse. If you have participants with multiple device ids you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes and stress2 and stress4 are shifted backwards by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some necessary information to compute a few of such features will be extracted from all three segments, for example the phone contact that called a participant the most or the location clusters visited by a participant.","title":"Time Segments"},{"location":"setup/configuration/#segment-examples","text":"5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3","title":"Segment Examples"},{"location":"setup/configuration/#device-data-source-configuration","text":"You might need to modify the following config keys in your config.yaml depending on what devices your participants used and where you are storing your data. You can ignore [PHONE_DATA_CONFIGURATION] or [FITBIT_DATA_CONFIGURATION] if you are not working with either devices. Phone The relevant config.yaml section looks like this by default: PHONE_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # SINGLE (MULTIPLE support coming soon) VALUE : *timezone Parameters for [PHONE_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] Only DATABASE is supported (phone data will be pulled from a database) [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify smartphones. For data collected with AWARE this is usually device_id [TIMEZONE] [TYPE] Only SINGLE is supported for now [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Fitbit The relevant config.yaml section looks like this by default: FITBIT_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE # DATABASE or FILES (set each [FITBIT_SENSOR][TABLE] attribute with a table name or a file path accordingly) COLUMN_FORMAT : JSON # JSON or PLAIN_TEXT DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # Fitbit devices don't support time zones so we read this data in the timezone indicated by VALUE VALUE : *timezone Parameters for For [FITBIT_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] DATABASE or FILES (set each [FITBIT_SENSOR] [TABLE] attribute accordingly with a table name or a file path) [SOURCE] [COLUMN_FORMAT] JSON or PLAIN_TEXT . Column format of the source data. If you pulled your data directly from the Fitbit API the column containing the sensor data will be in JSON format [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials . Only used if [TYPE] is DATABASE . [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify Fitbit devices. [TIMEZONE] [TYPE] Only SINGLE is supported (Fitbit devices always store data in local time). [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study","title":"Device Data Source Configuration"},{"location":"setup/configuration/#sensor-and-features-to-process","text":"Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Sensor and Features to Process"},{"location":"setup/execution/","text":"Execution \u00b6 After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction Info The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will update the output file with the correct features. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be load in memory at the same time. If RAPIDS crashes because it ran out of memory reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Forcing a complete rerun If you want to update your data from your database or rerun the whole pipeline from scratch run one or both of the following commands depending on the devices you are using: ./rapids -j1 -R download_phone_data ./rapids -j1 -R download_fitbit_data Deleting RAPIDS output If you want to delete all the output files RAPIDS produces you can execute the following command: ./rapids -j1 --delete-all-output","title":"Execution"},{"location":"setup/execution/#execution","text":"After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction Info The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will update the output file with the correct features. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be load in memory at the same time. If RAPIDS crashes because it ran out of memory reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Forcing a complete rerun If you want to update your data from your database or rerun the whole pipeline from scratch run one or both of the following commands depending on the devices you are using: ./rapids -j1 -R download_phone_data ./rapids -j1 -R download_fitbit_data Deleting RAPIDS output If you want to delete all the output files RAPIDS produces you can execute the following command: ./rapids -j1 --delete-all-output","title":"Execution"},{"location":"setup/installation/","text":"Installation \u00b6 You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Ubuntu Docker Install Docker Pull our RAPIDS container docker pull agamk/rapids:latest ` Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it agamk/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the agamk/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder insidethe container. MacOS We tested these instructions in Catalina Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested on Ubuntu 18.04 & 20.04 Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository For 18.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' For 20.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1","title":"Installation"},{"location":"setup/installation/#installation","text":"You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Ubuntu Docker Install Docker Pull our RAPIDS container docker pull agamk/rapids:latest ` Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it agamk/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the agamk/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder insidethe container. MacOS We tested these instructions in Catalina Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested on Ubuntu 18.04 & 20.04 Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository For 18.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' For 20.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1","title":"Installation"},{"location":"visualizations/data-quality-visualizations/","text":"Data Quality Visualizations \u00b6 We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (t01 and t02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.) 1. Histograms of phone data yield \u00b6 RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, afternoon, evening, and night) 2. Heatmaps of overall data yield \u00b6 These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis is labelled with the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants t01 and t02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants 3. Heatmap of recorded phone sensors \u00b6 In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant t01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 8 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant 4. Heatmap of sensor row count \u00b6 These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 16 phone sensors (including data yield) of t01\u2019s daily segments (only half of the sensor names and dates are visible in the screenshot but all can be accessed in the interactive plot). From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"Data Quality"},{"location":"visualizations/data-quality-visualizations/#data-quality-visualizations","text":"We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (t01 and t02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.)","title":"Data Quality Visualizations"},{"location":"visualizations/data-quality-visualizations/#1-histograms-of-phone-data-yield","text":"RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, afternoon, evening, and night)","title":"1. Histograms of phone data yield"},{"location":"visualizations/data-quality-visualizations/#2-heatmaps-of-overall-data-yield","text":"These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis is labelled with the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants t01 and t02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants","title":"2. Heatmaps of overall data yield"},{"location":"visualizations/data-quality-visualizations/#3-heatmap-of-recorded-phone-sensors","text":"In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant t01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 8 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant","title":"3. Heatmap of recorded phone sensors"},{"location":"visualizations/data-quality-visualizations/#4-heatmap-of-sensor-row-count","text":"These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 16 phone sensors (including data yield) of t01\u2019s daily segments (only half of the sensor names and dates are visible in the screenshot but all can be accessed in the interactive plot). From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"4. Heatmap of sensor row count"},{"location":"visualizations/feature-visualizations/","text":"Feature Visualizations \u00b6 1. Heatmap Correlation Matrix \u00b6 Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"Features"},{"location":"visualizations/feature-visualizations/#feature-visualizations","text":"","title":"Feature Visualizations"},{"location":"visualizations/feature-visualizations/#1-heatmap-correlation-matrix","text":"Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"1. Heatmap Correlation Matrix"},{"location":"workflow-examples/analysis/","text":"Analysis Workflow Example \u00b6 TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers Why should I integrate my analysis in RAPIDS? \u00b6 Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS. Analysis workflow structure \u00b6 To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules . Description of the study modeled in our analysis workflow example \u00b6 Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share a database with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation Configure and run the analysis workflow example \u00b6 Install RAPIDS Configure the user credentials of a local or remote MySQL server with writing permissions in your .env file. The example config file is at example_profile/example_config.yaml . Unzip the test database to data/external/rapids_example.sql and run: ./rapids -j1 restore_sql_file --profile example_profile Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile Modules of our analysis workflow example \u00b6 1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the participant_info table of our test database . As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the participant_target table of our test database and transferred to a CSV file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_features_for_population_model rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 162 features over 11 days for the individual model of p01, 107 features over 12 days for the individual model of p02 and 101 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Analysis"},{"location":"workflow-examples/analysis/#analysis-workflow-example","text":"TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers","title":"Analysis Workflow Example"},{"location":"workflow-examples/analysis/#why-should-i-integrate-my-analysis-in-rapids","text":"Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS.","title":"Why should I integrate my analysis in RAPIDS?"},{"location":"workflow-examples/analysis/#analysis-workflow-structure","text":"To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules .","title":"Analysis workflow structure"},{"location":"workflow-examples/analysis/#description-of-the-study-modeled-in-our-analysis-workflow-example","text":"Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share a database with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation","title":"Description of the study modeled in our analysis workflow example"},{"location":"workflow-examples/analysis/#configure-and-run-the-analysis-workflow-example","text":"Install RAPIDS Configure the user credentials of a local or remote MySQL server with writing permissions in your .env file. The example config file is at example_profile/example_config.yaml . Unzip the test database to data/external/rapids_example.sql and run: ./rapids -j1 restore_sql_file --profile example_profile Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile","title":"Configure and run the analysis workflow example"},{"location":"workflow-examples/analysis/#modules-of-our-analysis-workflow-example","text":"1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the participant_info table of our test database . As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the participant_target table of our test database and transferred to a CSV file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_features_for_population_model rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 162 features over 11 days for the individual model of p01, 107 features over 12 days for the individual model of p02 and 101 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Modules of our analysis workflow example"},{"location":"workflow-examples/minimal/","text":"Minimal Working Example \u00b6 This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for daily and night epochs of one participant monitored on the US East coast. Install RAPIDS and make sure your conda environment is active (see Installation ) Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Things to change on each configuration step 1. Setup your database connection credentials in .env . We assume your credentials group is called MY_GROUP . 2. America/New_York should be the default timezone 3. Create a participant file p01.yaml based on one of your participants and add p01 to [PIDS] in config.yaml . The following would be the content of your p01.yaml participant file: PHONE : DEVICE_IDS : [ aaaaaaaa-1111-bbbb-2222-cccccccccccc ] # your participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty 4. [TIME_SEGMENTS][TYPE] should be the default PERIODIC . Change [TIME_SEGMENTS][FILE] with the path of a file containing the following lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 5. If you collected data with AWARE you won\u2019t need to modify the attributes of [DEVICE_DATA][PHONE] 6. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True Example of the config.yaml sections after the changes outlined above PIDS: [p01] TIMEZONE: &timezone America/New_York DATABASE_GROUP: &database_group MY_GROUP # ... other irrelevant sections TIME_SEGMENTS: &time_segments TYPE: PERIODIC FILE: \"data/external/timesegments_periodic.csv\" # make sure the three lines specified above are in the file INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # No need to change this if you collected AWARE data on a database and your credentials are grouped under `MY_GROUP` in `.env` DEVICE_DATA: PHONE: SOURCE: TYPE: DATABASE DATABASE_GROUP: *database_group DEVICE_ID_COLUMN: device_id # column name TIMEZONE: TYPE: SINGLE # SINGLE or MULTIPLE VALUE: *timezone ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS: TABLE: calls # change if your calls table has a different name PROVIDERS: RAPIDS: COMPUTE: True # set this to True! CALL_TYPES: ... Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in /data/processed/features/p01/phone_calls.csv","title":"Minimal"},{"location":"workflow-examples/minimal/#minimal-working-example","text":"This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for daily and night epochs of one participant monitored on the US East coast. Install RAPIDS and make sure your conda environment is active (see Installation ) Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Things to change on each configuration step 1. Setup your database connection credentials in .env . We assume your credentials group is called MY_GROUP . 2. America/New_York should be the default timezone 3. Create a participant file p01.yaml based on one of your participants and add p01 to [PIDS] in config.yaml . The following would be the content of your p01.yaml participant file: PHONE : DEVICE_IDS : [ aaaaaaaa-1111-bbbb-2222-cccccccccccc ] # your participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty 4. [TIME_SEGMENTS][TYPE] should be the default PERIODIC . Change [TIME_SEGMENTS][FILE] with the path of a file containing the following lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 5. If you collected data with AWARE you won\u2019t need to modify the attributes of [DEVICE_DATA][PHONE] 6. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True Example of the config.yaml sections after the changes outlined above PIDS: [p01] TIMEZONE: &timezone America/New_York DATABASE_GROUP: &database_group MY_GROUP # ... other irrelevant sections TIME_SEGMENTS: &time_segments TYPE: PERIODIC FILE: \"data/external/timesegments_periodic.csv\" # make sure the three lines specified above are in the file INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # No need to change this if you collected AWARE data on a database and your credentials are grouped under `MY_GROUP` in `.env` DEVICE_DATA: PHONE: SOURCE: TYPE: DATABASE DATABASE_GROUP: *database_group DEVICE_ID_COLUMN: device_id # column name TIMEZONE: TYPE: SINGLE # SINGLE or MULTIPLE VALUE: *timezone ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS: TABLE: calls # change if your calls table has a different name PROVIDERS: RAPIDS: COMPUTE: True # set this to True! CALL_TYPES: ... Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in /data/processed/features/p01/phone_calls.csv","title":"Minimal Working Example"}]} \ No newline at end of file +{"config":{"lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to RAPIDS documentation \u00b6 Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data and structure your analysis into reproducible workflows. RAPIDS is open source, documented, modular, tested, and reproducible. At the moment we support smartphone data collected with AWARE and wearable data from Fitbit devices. Tip Questions or feedback can be posted on the #rapids channel in AWARE Framework's slack . Bugs and feature requests should be posted on Github . Join our discussions on our algorithms and assumptions for feature processing . Ready to start? Go to Installation , then to Configuration , and then to Execution How does it work? \u00b6 RAPIDS is formed by R and Python scripts orchestrated by Snakemake . We suggest you read Snakemake\u2019s docs but in short: every link in the analysis chain is atomic and has files as input and output. Behavioral features are processed per sensor and per participant. What are the benefits of using RAPIDS? \u00b6 Consistent analysis . Every participant sensor dataset is analyzed in the exact same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own behavioral features in R or Python, share them with the community, and keep authorship and citations. Timezone aware . Your data is adjusted to the specified timezone (multiple timezones suport coming soon ). Flexible time segments . You can extract behavioral features on time windows of any length (e.g. 5 minutes, 3 hours, 2 days), on every day or particular days (e.g. weekends, Mondays, the 1 st of each month, etc.) or around events of interest (e.g. surveys or clinical relapses). Tested code . We are constantly adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended thanks to R and Python virtual environments. You can share your analysis code along your publications without any overhead. Private . All your data is processed locally. How is it organized? \u00b6 In broad terms the config.yaml , .env file , participants files , and time segment files are the only ones that you will have to modify. All data is stored in data/ and all scripts are stored in src/ . For more information see RAPIDS\u2019 File Structure .","title":"Home"},{"location":"#welcome-to-rapids-documentation","text":"Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data and structure your analysis into reproducible workflows. RAPIDS is open source, documented, modular, tested, and reproducible. At the moment we support smartphone data collected with AWARE and wearable data from Fitbit devices. Tip Questions or feedback can be posted on the #rapids channel in AWARE Framework's slack . Bugs and feature requests should be posted on Github . Join our discussions on our algorithms and assumptions for feature processing . Ready to start? Go to Installation , then to Configuration , and then to Execution","title":"Welcome to RAPIDS documentation"},{"location":"#how-does-it-work","text":"RAPIDS is formed by R and Python scripts orchestrated by Snakemake . We suggest you read Snakemake\u2019s docs but in short: every link in the analysis chain is atomic and has files as input and output. Behavioral features are processed per sensor and per participant.","title":"How does it work?"},{"location":"#what-are-the-benefits-of-using-rapids","text":"Consistent analysis . Every participant sensor dataset is analyzed in the exact same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own behavioral features in R or Python, share them with the community, and keep authorship and citations. Timezone aware . Your data is adjusted to the specified timezone (multiple timezones suport coming soon ). Flexible time segments . You can extract behavioral features on time windows of any length (e.g. 5 minutes, 3 hours, 2 days), on every day or particular days (e.g. weekends, Mondays, the 1 st of each month, etc.) or around events of interest (e.g. surveys or clinical relapses). Tested code . We are constantly adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended thanks to R and Python virtual environments. You can share your analysis code along your publications without any overhead. Private . All your data is processed locally.","title":"What are the benefits of using RAPIDS?"},{"location":"#how-is-it-organized","text":"In broad terms the config.yaml , .env file , participants files , and time segment files are the only ones that you will have to modify. All data is stored in data/ and all scripts are stored in src/ . For more information see RAPIDS\u2019 File Structure .","title":"How is it organized?"},{"location":"change-log/","text":"Change Log \u00b6 v0.2.6 \u00b6 Fix old versions banner on nested pages v0.2.5 \u00b6 Fix docs deploy typo v0.2.4 \u00b6 Fix broken links in landing page and docs deploy v0.2.3 \u00b6 Fix participant IDS in the example analysis workflow v0.2.2 \u00b6 Fix readme link to docs v0.2.1 \u00b6 FIx link to the most recent version in the old version banner v0.2.0 \u00b6 Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item v0.1.0 \u00b6 New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"Change Log"},{"location":"change-log/#change-log","text":"","title":"Change Log"},{"location":"change-log/#v026","text":"Fix old versions banner on nested pages","title":"v0.2.6"},{"location":"change-log/#v025","text":"Fix docs deploy typo","title":"v0.2.5"},{"location":"change-log/#v024","text":"Fix broken links in landing page and docs deploy","title":"v0.2.4"},{"location":"change-log/#v023","text":"Fix participant IDS in the example analysis workflow","title":"v0.2.3"},{"location":"change-log/#v022","text":"Fix readme link to docs","title":"v0.2.2"},{"location":"change-log/#v021","text":"FIx link to the most recent version in the old version banner","title":"v0.2.1"},{"location":"change-log/#v020","text":"Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item","title":"v0.2.0"},{"location":"change-log/#v010","text":"New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"v0.1.0"},{"location":"citation/","text":"Cite RAPIDS and providers \u00b6 RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you! RAPIDS \u00b6 If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246 Panda (accelerometer) \u00b6 If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047. Stachl (applications foreground) \u00b6 If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117 Doryab (bluetooth) \u00b6 If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Barnett (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845 Doryab (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Citation"},{"location":"citation/#cite-rapids-and-providers","text":"RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you!","title":"Cite RAPIDS and providers"},{"location":"citation/#rapids","text":"If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246","title":"RAPIDS"},{"location":"citation/#panda-accelerometer","text":"If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047.","title":"Panda (accelerometer)"},{"location":"citation/#stachl-applications-foreground","text":"If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117","title":"Stachl (applications foreground)"},{"location":"citation/#doryab-bluetooth","text":"If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394","title":"Doryab (bluetooth)"},{"location":"citation/#barnett-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Barnett (locations)"},{"location":"citation/#doryab-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Doryab (locations)"},{"location":"code_of_conduct/","text":"Contributor Covenant Code of Conduct \u00b6 Our Pledge \u00b6 We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards \u00b6 Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting Enforcement Responsibilities \u00b6 Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope \u00b6 This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement \u00b6 Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines \u00b6 Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction \u00b6 Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning \u00b6 Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban \u00b6 Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban \u00b6 Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community. Attribution \u00b6 This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Code of Conduct"},{"location":"code_of_conduct/#contributor-covenant-code-of-conduct","text":"","title":"Contributor Covenant Code of Conduct"},{"location":"code_of_conduct/#our-pledge","text":"We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.","title":"Our Pledge"},{"location":"code_of_conduct/#our-standards","text":"Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting","title":"Our Standards"},{"location":"code_of_conduct/#enforcement-responsibilities","text":"Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.","title":"Enforcement Responsibilities"},{"location":"code_of_conduct/#scope","text":"This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.","title":"Scope"},{"location":"code_of_conduct/#enforcement","text":"Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident.","title":"Enforcement"},{"location":"code_of_conduct/#enforcement-guidelines","text":"Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:","title":"Enforcement Guidelines"},{"location":"code_of_conduct/#1-correction","text":"Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.","title":"1. Correction"},{"location":"code_of_conduct/#2-warning","text":"Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.","title":"2. Warning"},{"location":"code_of_conduct/#3-temporary-ban","text":"Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.","title":"3. Temporary Ban"},{"location":"code_of_conduct/#4-permanent-ban","text":"Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community.","title":"4. Permanent Ban"},{"location":"code_of_conduct/#attribution","text":"This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Attribution"},{"location":"faq/","text":"Frequently Asked Questions \u00b6 Cannot connect to your MySQL server \u00b6 Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env . Cannot start mysql in linux via brew services start mysql \u00b6 Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start Every time I run force the download_dataset rule all rules are executed \u00b6 Problem When running snakemake -j1 -R download_phone_data or ./rapids -j1 -R download_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed. Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule. \u00b6 Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [TABLE] of each sensor you activated in config.yaml match your database tables. How do I install RAPIDS on Ubuntu 16.04 \u00b6 Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion. mysql.h cannot be found \u00b6 Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev No package libcurl found \u00b6 Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev Configuration failed because openssl was not found. \u00b6 Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev Configuration failed because libxml-2.0 was not found \u00b6 Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev SSL connection error when running RAPIDS \u00b6 Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here DB_TABLES key not found \u00b6 Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : [] Error while updating your conda environment in Ubuntu \u00b6 Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda Embedded nul in string \u00b6 Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: If it\u2019s only a few rows that are causing this problem, remove the the null character from the conflictive table cell. If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Liunx: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/download_phone_data.R and replace library(RMariaDB) with library(RMySQL) In the same file replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group)","title":"FAQ"},{"location":"faq/#frequently-asked-questions","text":"","title":"Frequently Asked Questions"},{"location":"faq/#cannot-connect-to-your-mysql-server","text":"Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env .","title":"Cannot connect to your MySQL server"},{"location":"faq/#cannot-start-mysql-in-linux-via-brew-services-start-mysql","text":"Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start","title":"Cannot start mysql in linux via brew services start mysql"},{"location":"faq/#every-time-i-run-force-the-download_dataset-rule-all-rules-are-executed","text":"Problem When running snakemake -j1 -R download_phone_data or ./rapids -j1 -R download_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed.","title":"Every time I run force the download_dataset rule all rules are executed"},{"location":"faq/#error-table-xxx-doesnt-exist-while-running-the-download_phone_data-or-download_fitbit_data-rule","text":"Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [TABLE] of each sensor you activated in config.yaml match your database tables.","title":"Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule."},{"location":"faq/#how-do-i-install-rapids-on-ubuntu-1604","text":"Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion.","title":"How do I install RAPIDS on Ubuntu 16.04"},{"location":"faq/#mysqlh-cannot-be-found","text":"Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev","title":"mysql.h cannot be found"},{"location":"faq/#no-package-libcurl-found","text":"Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev","title":"No package libcurl found"},{"location":"faq/#configuration-failed-because-openssl-was-not-found","text":"Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev","title":"Configuration failed because openssl was not found."},{"location":"faq/#configuration-failed-because-libxml-20-was-not-found","text":"Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev","title":"Configuration failed because libxml-2.0 was not found"},{"location":"faq/#ssl-connection-error-when-running-rapids","text":"Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here","title":"SSL connection error when running RAPIDS"},{"location":"faq/#db_tables-key-not-found","text":"Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : []","title":"DB_TABLES key not found"},{"location":"faq/#error-while-updating-your-conda-environment-in-ubuntu","text":"Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda","title":"Error while updating your conda environment in Ubuntu"},{"location":"faq/#embedded-nul-in-string","text":"Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: If it\u2019s only a few rows that are causing this problem, remove the the null character from the conflictive table cell. If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Liunx: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/download_phone_data.R and replace library(RMariaDB) with library(RMySQL) In the same file replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group)","title":"Embedded nul in string"},{"location":"file-structure/","text":"File Structure \u00b6 Tip Read this page if you want to learn more about how RAPIDS is structured. If you want to start using it go to Installation , then to Configuration , and then to Execution All paths mentioned in this page are relative to RAPIDS\u2019 root folder. If you want to extract the behavioral features that RAPIDS offers, you will only have to create or modify the .env file , participants files , time segment files , and the config.yaml file as instructed in the Configuration page . The config.yaml file is the heart of RAPIDS and includes parameters to manage participants, data sources, sensor data, visualizations and more. All data is saved in data/ . The data/external/ folder stores any data imported or created by the user, data/raw/ stores sensor data as imported from your database, data/interim/ has intermediate files necessary to compute behavioral features from raw data, and data/processed/ has all the final files with the behavioral features in folders per participant and sensor. RAPIDS source code is saved in src/ . The src/data/ folder stores scripts to download, clean and pre-process sensor data, src/features has scripts to extract behavioral features organized in their respective sensor subfolders , src/models/ can host any script to create models or statistical analyses with the behavioral features you extract, and src/visualization/ has scripts to create plots of the raw and processed data. There are other files and folders but only relevant if you are interested in extending RAPIDS (e.g. virtual env files, docs, tests, Dockerfile, the Snakefile, etc.). In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS","title":"File Structure"},{"location":"file-structure/#file-structure","text":"Tip Read this page if you want to learn more about how RAPIDS is structured. If you want to start using it go to Installation , then to Configuration , and then to Execution All paths mentioned in this page are relative to RAPIDS\u2019 root folder. If you want to extract the behavioral features that RAPIDS offers, you will only have to create or modify the .env file , participants files , time segment files , and the config.yaml file as instructed in the Configuration page . The config.yaml file is the heart of RAPIDS and includes parameters to manage participants, data sources, sensor data, visualizations and more. All data is saved in data/ . The data/external/ folder stores any data imported or created by the user, data/raw/ stores sensor data as imported from your database, data/interim/ has intermediate files necessary to compute behavioral features from raw data, and data/processed/ has all the final files with the behavioral features in folders per participant and sensor. RAPIDS source code is saved in src/ . The src/data/ folder stores scripts to download, clean and pre-process sensor data, src/features has scripts to extract behavioral features organized in their respective sensor subfolders , src/models/ can host any script to create models or statistical analyses with the behavioral features you extract, and src/visualization/ has scripts to create plots of the raw and processed data. There are other files and folders but only relevant if you are interested in extending RAPIDS (e.g. virtual env files, docs, tests, Dockerfile, the Snakefile, etc.). In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS","title":"File Structure"},{"location":"migrating-from-old-versions/","text":"Migrating from RAPIDS beta \u00b6 If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the new RAPIDS (we are starting with v0.1.0 ) right away, just take into account the following: Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Follow the new Configuration guide. You can copy and paste your old .env file You can migrate your old participant files: python tools/update_format_participant_files.py Get familiar with the new way of Executing RAPIDS You can proceed to reconfigure your config.yaml , its structure is more consistent and should be familiar to you. Info If you have any questions reach out to us on Slack .","title":"Migrating from beta"},{"location":"migrating-from-old-versions/#migrating-from-rapids-beta","text":"If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the new RAPIDS (we are starting with v0.1.0 ) right away, just take into account the following: Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Follow the new Configuration guide. You can copy and paste your old .env file You can migrate your old participant files: python tools/update_format_participant_files.py Get familiar with the new way of Executing RAPIDS You can proceed to reconfigure your config.yaml , its structure is more consistent and should be familiar to you. Info If you have any questions reach out to us on Slack .","title":"Migrating from RAPIDS beta"},{"location":"team/","text":"RAPIDS Team \u00b6 If you are interested in contributing feel free to submit a pull request or contact us. Core Team \u00b6 Julio Vega (Designer and Lead Developer) \u00b6 About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website Meng Li \u00b6 About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile Abhineeth Reddy Kunta \u00b6 About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University. Kwesi Aguillera \u00b6 About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile Echhit Joshi \u00b6 About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile Nicolas Leo \u00b6 About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems. Nikunj Goel \u00b6 About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile Community Contributors \u00b6 Agam Kumar \u00b6 About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile Yasaman S. Sefidgar \u00b6 About Linkedin Profile Advisors \u00b6 Afsaneh Doryab \u00b6 About Personal Website Carissa Low \u00b6 About Profile","title":"Team"},{"location":"team/#rapids-team","text":"If you are interested in contributing feel free to submit a pull request or contact us.","title":"RAPIDS Team"},{"location":"team/#core-team","text":"","title":"Core Team"},{"location":"team/#julio-vega-designer-and-lead-developer","text":"About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website","title":"Julio Vega (Designer and Lead Developer)"},{"location":"team/#meng-li","text":"About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile","title":"Meng Li"},{"location":"team/#abhineeth-reddy-kunta","text":"About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University.","title":"Abhineeth Reddy Kunta"},{"location":"team/#kwesi-aguillera","text":"About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile","title":"Kwesi Aguillera"},{"location":"team/#echhit-joshi","text":"About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile","title":"Echhit Joshi"},{"location":"team/#nicolas-leo","text":"About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems.","title":"Nicolas Leo"},{"location":"team/#nikunj-goel","text":"About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile","title":"Nikunj Goel"},{"location":"team/#community-contributors","text":"","title":"Community Contributors"},{"location":"team/#agam-kumar","text":"About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile","title":"Agam Kumar"},{"location":"team/#yasaman-s-sefidgar","text":"About Linkedin Profile","title":"Yasaman S. Sefidgar"},{"location":"team/#advisors","text":"","title":"Advisors"},{"location":"team/#afsaneh-doryab","text":"About Personal Website","title":"Afsaneh Doryab"},{"location":"team/#carissa-low","text":"About Profile","title":"Carissa Low"},{"location":"developers/documentation/","text":"Documentation \u00b6 We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically. Set up development environment \u00b6 Make sure your conda environment is active pip install mkdocs pip install mkdocs-material Preview \u00b6 Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve File Structure \u00b6 The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation. Reference \u00b6 Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md ) Extras \u00b6 You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Documentation"},{"location":"developers/documentation/#documentation","text":"We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically.","title":"Documentation"},{"location":"developers/documentation/#set-up-development-environment","text":"Make sure your conda environment is active pip install mkdocs pip install mkdocs-material","title":"Set up development environment"},{"location":"developers/documentation/#preview","text":"Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve","title":"Preview"},{"location":"developers/documentation/#file-structure","text":"The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation.","title":"File Structure"},{"location":"developers/documentation/#reference","text":"Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md )","title":"Reference"},{"location":"developers/documentation/#extras","text":"You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Extras"},{"location":"developers/git-flow/","text":"Git Flow \u00b6 We use the develop/master variation of the OneFlow git flow Add New Features \u00b6 We use feature (topic) branches to implement new features Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Integrate your new feature to develop Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. git checkout feature/feature1 git pull origin develop git rebase -i develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Push your feature branch online git push --set-upstream origin feature/external-test Then open a pull request to the develop branch using Github\u2019s GUI Release a New Version \u00b6 Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log) Release a Hotfix \u00b6 Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Git Flow"},{"location":"developers/git-flow/#git-flow","text":"We use the develop/master variation of the OneFlow git flow","title":"Git Flow"},{"location":"developers/git-flow/#add-new-features","text":"We use feature (topic) branches to implement new features Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Integrate your new feature to develop Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. git checkout feature/feature1 git pull origin develop git rebase -i develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Push your feature branch online git push --set-upstream origin feature/external-test Then open a pull request to the develop branch using Github\u2019s GUI","title":"Add New Features"},{"location":"developers/git-flow/#release-a-new-version","text":"Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log)","title":"Release a New Version"},{"location":"developers/git-flow/#release-a-hotfix","text":"Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Release a Hotfix"},{"location":"developers/remote-support/","text":"Remote Support \u00b6 We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open you RAPIDS root folder in a new VSCode window Open a new Terminal Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session 6. Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/remote-support/#remote-support","text":"We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open you RAPIDS root folder in a new VSCode window Open a new Terminal Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session 6. Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/test-cases/","text":"Test Cases \u00b6 Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available. Messages (SMS) \u00b6 The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Calls \u00b6 Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Screen \u00b6 Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Battery \u00b6 Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Bluetooth \u00b6 The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files. WIFI \u00b6 There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files. Light \u00b6 The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Application Foreground \u00b6 The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Activity Recognition \u00b6 The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files. Conversation \u00b6 The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files","title":"Test cases"},{"location":"developers/test-cases/#test-cases","text":"Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available.","title":"Test Cases"},{"location":"developers/test-cases/#messages-sms","text":"The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Messages (SMS)"},{"location":"developers/test-cases/#calls","text":"Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Calls"},{"location":"developers/test-cases/#screen","text":"Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Screen"},{"location":"developers/test-cases/#battery","text":"Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Battery"},{"location":"developers/test-cases/#bluetooth","text":"The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Bluetooth"},{"location":"developers/test-cases/#wifi","text":"There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files.","title":"WIFI"},{"location":"developers/test-cases/#light","text":"The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Light"},{"location":"developers/test-cases/#application-foreground","text":"The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Application Foreground"},{"location":"developers/test-cases/#activity-recognition","text":"The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Activity Recognition"},{"location":"developers/test-cases/#conversation","text":"The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files","title":"Conversation"},{"location":"developers/testing/","text":"Testing \u00b6 The following is a simple guide to testing RAPIDS. All files necessary for testing are stored in the /tests directory Steps for Testing \u00b6 To begin testing RAPIDS place the fake raw input data csv files in tests/data/raw/ . The fake participant files should be placed in tests/data/external/ . The expected output files of RAPIDS after processing the input data should be placed in tests/data/processesd/ . The Snakemake rule(s) that are to be tested must be placed in the tests/Snakemake file. The current tests/Snakemake is a good example of how to define them. (At the time of writing this documentation the snakefile contains rules messages (SMS), calls and screen) Edit the tests/settings/config.yaml . Add and/or remove the rules to be run for testing from the forcerun list. Edit the tests/settings/testing_config.yaml with the necessary configuration settings for running the rules to be tested. Add any additional testscripts in tests/scripts . Uncomment or comment off lines in the testing shell script tests/scripts/run_tests.sh . Run the testing shell script. tests/scripts/run_tests.sh The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... FAIL ====================================================================== FAIL: test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ---------------------------------------------------------------------- The results above show that the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. In addition you should get the traceback of the failure (not shown here). For more information on how to implement test scripts and use unittest please see Unittest Documentation Testing of the RAPIDS sensors and features is a work-in-progress. Please see test-cases for a list of sensors and features that have testing currently available. Currently the repository is set up to test a number of sensors out of the box by simply running the tests/scripts/run_tests.sh command once the RAPIDS python environment is active.","title":"Testing"},{"location":"developers/testing/#testing","text":"The following is a simple guide to testing RAPIDS. All files necessary for testing are stored in the /tests directory","title":"Testing"},{"location":"developers/testing/#steps-for-testing","text":"To begin testing RAPIDS place the fake raw input data csv files in tests/data/raw/ . The fake participant files should be placed in tests/data/external/ . The expected output files of RAPIDS after processing the input data should be placed in tests/data/processesd/ . The Snakemake rule(s) that are to be tested must be placed in the tests/Snakemake file. The current tests/Snakemake is a good example of how to define them. (At the time of writing this documentation the snakefile contains rules messages (SMS), calls and screen) Edit the tests/settings/config.yaml . Add and/or remove the rules to be run for testing from the forcerun list. Edit the tests/settings/testing_config.yaml with the necessary configuration settings for running the rules to be tested. Add any additional testscripts in tests/scripts . Uncomment or comment off lines in the testing shell script tests/scripts/run_tests.sh . Run the testing shell script. tests/scripts/run_tests.sh The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... FAIL ====================================================================== FAIL: test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ---------------------------------------------------------------------- The results above show that the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. In addition you should get the traceback of the failure (not shown here). For more information on how to implement test scripts and use unittest please see Unittest Documentation Testing of the RAPIDS sensors and features is a work-in-progress. Please see test-cases for a list of sensors and features that have testing currently available. Currently the repository is set up to test a number of sensors out of the box by simply running the tests/scripts/run_tests.sh command once the RAPIDS python environment is active.","title":"Steps for Testing"},{"location":"developers/virtual-environments/","text":"Python Virtual Environment \u00b6 Add new packages \u00b6 Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ). Remove packages \u00b6 Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME Update your conda environment.yaml \u00b6 After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' > environment.yml R Virtual Environment \u00b6 Add new packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\") Remove packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\") Update your R renv.lock \u00b6 After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Virtual Environments"},{"location":"developers/virtual-environments/#python-virtual-environment","text":"","title":"Python Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages","text":"Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ).","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages","text":"Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME","title":"Remove packages"},{"location":"developers/virtual-environments/#update-your-conda-environmentyaml","text":"After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' > environment.yml","title":"Update your conda environment.yaml"},{"location":"developers/virtual-environments/#r-virtual-environment","text":"","title":"R Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\")","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\")","title":"Remove packages"},{"location":"developers/virtual-environments/#update-your-r-renvlock","text":"After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Update your R renv.lock"},{"location":"features/add-new-features/","text":"Add New Features \u00b6 Hint We recommend reading the Behavioral Features Introduction before reading this page Hint You won\u2019t have to deal with time zones, dates, times, data cleaning or preprocessing. The data that RAPIDS pipes to your feature extraction code is ready to process. New Features for Existing Sensors \u00b6 You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create a provider folder, script and function Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 in Python and that it requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor is any of the phone or Fitbit sensors with a configuration entry in config.yaml : Phone Accelerometer Phone Activity Recognition Phone Applications Foreground Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Light Phone Locations Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Steps Summary Fitbit Steps Intraday Modify the config.yaml file \u00b6 In this step you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : TABLE : accelerometer PROVIDERS : RAPIDS : COMPUTE : False ... PANDA : COMPUTE : False ... VEGA : COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_FOLDER : \"vega\" SRC_LANGUAGE : \"python\" Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string or an array of any of such types. [SRC_LANGUAGE] The programming language of your provider script, it can be python or r , in our example python [SRC_FOLDER] The name of your provider in lower case, in our example vega (this will be the name of your folder in the next step) Create a provider folder, script and function \u00b6 In this step you need to add a folder, script and function for your provider. Create your provider folder under src/feature/DEVICE_SENSOR/YOUR_PROVIDER , in our example src/feature/phone_accelerometer/vega (same as [SRC_FOLDER] in the step above). Create your provider script inside your provider folder, it can be a Python file called main.py or an R file called main.R . Add your provider function in your provider script. The name of such function should be [providername]_features , in our example vega_features Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): R function [ providername ] _ features <- function ( sensor_data , time_segment , provider ) Implement your feature extraction code \u00b6 The provider function that you created in the step above will receive the following parameters: Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The code to extract your behavioral features should be implemented in your provider function and in general terms it will have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that phone\u2019s battery, screen, and activity recognition data is given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but to undersand why we need it, keep reading. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period of time that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and week-end basis for p01 . The labels are arbritrary and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks and once where time_segment is my_weekends . In this example not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from this number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g. 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. By convention the name of your features should only contain letters or numbers ( feature1 ). RAPIDS will automatically add the right sensor and provider prefix ( phone_accelerometr_vega_ ) PHONE_ACCELEROMETER Provider Example For your reference, this a short example of our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features New Features for Non-Existing Sensors \u00b6 If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), contact us or request it on Slack and we can add the necessary code so you can follow the instructions above.","title":"Add New Features"},{"location":"features/add-new-features/#add-new-features","text":"Hint We recommend reading the Behavioral Features Introduction before reading this page Hint You won\u2019t have to deal with time zones, dates, times, data cleaning or preprocessing. The data that RAPIDS pipes to your feature extraction code is ready to process.","title":"Add New Features"},{"location":"features/add-new-features/#new-features-for-existing-sensors","text":"You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create a provider folder, script and function Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 in Python and that it requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor is any of the phone or Fitbit sensors with a configuration entry in config.yaml : Phone Accelerometer Phone Activity Recognition Phone Applications Foreground Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Light Phone Locations Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Steps Summary Fitbit Steps Intraday","title":"New Features for Existing Sensors"},{"location":"features/add-new-features/#modify-the-configyaml-file","text":"In this step you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : TABLE : accelerometer PROVIDERS : RAPIDS : COMPUTE : False ... PANDA : COMPUTE : False ... VEGA : COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_FOLDER : \"vega\" SRC_LANGUAGE : \"python\" Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string or an array of any of such types. [SRC_LANGUAGE] The programming language of your provider script, it can be python or r , in our example python [SRC_FOLDER] The name of your provider in lower case, in our example vega (this will be the name of your folder in the next step)","title":"Modify the config.yaml file"},{"location":"features/add-new-features/#create-a-provider-folder-script-and-function","text":"In this step you need to add a folder, script and function for your provider. Create your provider folder under src/feature/DEVICE_SENSOR/YOUR_PROVIDER , in our example src/feature/phone_accelerometer/vega (same as [SRC_FOLDER] in the step above). Create your provider script inside your provider folder, it can be a Python file called main.py or an R file called main.R . Add your provider function in your provider script. The name of such function should be [providername]_features , in our example vega_features Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): R function [ providername ] _ features <- function ( sensor_data , time_segment , provider )","title":"Create a provider folder, script and function"},{"location":"features/add-new-features/#implement-your-feature-extraction-code","text":"The provider function that you created in the step above will receive the following parameters: Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The code to extract your behavioral features should be implemented in your provider function and in general terms it will have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that phone\u2019s battery, screen, and activity recognition data is given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but to undersand why we need it, keep reading. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period of time that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and week-end basis for p01 . The labels are arbritrary and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks and once where time_segment is my_weekends . In this example not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from this number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g. 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. By convention the name of your features should only contain letters or numbers ( feature1 ). RAPIDS will automatically add the right sensor and provider prefix ( phone_accelerometr_vega_ ) PHONE_ACCELEROMETER Provider Example For your reference, this a short example of our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features","title":"Implement your feature extraction code"},{"location":"features/add-new-features/#new-features-for-non-existing-sensors","text":"If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), contact us or request it on Slack and we can add the necessary code so you can follow the instructions above.","title":"New Features for Non-Existing Sensors"},{"location":"features/feature-introduction/","text":"Behavioral Features Introduction \u00b6 Every phone or Fitbit sensor has a corresponding config section in config.yaml , these sections follow a similar structure and we\u2019ll use PHONE_ACCELEROMETER as an example to explain this structure. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. Config section example for PHONE_ACCELEROMETER # 1) Config section PHONE_ACCELEROMETER : # 2) Parameters for PHONE_ACCELEROMETER TABLE : accelerometer # 3) Providers for PHONE_ACCELEROMETER PROVIDERS : # 4) RAPIDS provider RAPIDS : # 4.1) Parameters of RAPIDS provider of PHONE_ACCELEROMETER COMPUTE : False # 4.2) Features of RAPIDS provider of PHONE_ACCELEROMETER FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_FOLDER : \"rapids\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" # 5) PANDA provider PANDA : # 5.1) Parameters of PANDA provider of PHONE_ACCELEROMETER COMPUTE : False VALID_SENSED_MINUTES : False # 5.2) Features of PANDA provider of PHONE_ACCELEROMETER FEATURES : exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] SRC_FOLDER : \"panda\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" Sensor Parameters \u00b6 Each sensor configuration section has a \u201cparameters\u201d subsection (see #2 in the example). These are parameters that affect different aspects of how the raw data is downloaded, and processed. The TABLE parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers \u00b6 Each sensor configuration section can have zero, one or more behavioral feature providers (see #3 in the example). A provider is a script created by the core RAPIDS team or other researchers that extracts behavioral features for that sensor. In this example, accelerometer has two providers: RAPIDS (see #4 ) and PANDA (see #5 ). Provider Parameters \u00b6 Each provider has parameters that affect the computation of the behavioral features it offers (see #4.1 or #5.1 in the example). These parameters will include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. Provider Features \u00b6 Each provider offers a set of behavioral features (see #4.2 or #5.2 in the example). For some providers these features are grouped in an array (like those for RAPIDS provider in #4.2 ) but for others they are grouped in a collection of arrays depending on the meaning and purpose of those features (like those for PANDAS provider in #5.2 ). In either case, you can delete the features you are not interested in and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page.","title":"Introduction"},{"location":"features/feature-introduction/#behavioral-features-introduction","text":"Every phone or Fitbit sensor has a corresponding config section in config.yaml , these sections follow a similar structure and we\u2019ll use PHONE_ACCELEROMETER as an example to explain this structure. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. Config section example for PHONE_ACCELEROMETER # 1) Config section PHONE_ACCELEROMETER : # 2) Parameters for PHONE_ACCELEROMETER TABLE : accelerometer # 3) Providers for PHONE_ACCELEROMETER PROVIDERS : # 4) RAPIDS provider RAPIDS : # 4.1) Parameters of RAPIDS provider of PHONE_ACCELEROMETER COMPUTE : False # 4.2) Features of RAPIDS provider of PHONE_ACCELEROMETER FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_FOLDER : \"rapids\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\" # 5) PANDA provider PANDA : # 5.1) Parameters of PANDA provider of PHONE_ACCELEROMETER COMPUTE : False VALID_SENSED_MINUTES : False # 5.2) Features of PANDA provider of PHONE_ACCELEROMETER FEATURES : exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] SRC_FOLDER : \"panda\" # inside src/features/phone_accelerometer SRC_LANGUAGE : \"python\"","title":"Behavioral Features Introduction"},{"location":"features/feature-introduction/#sensor-parameters","text":"Each sensor configuration section has a \u201cparameters\u201d subsection (see #2 in the example). These are parameters that affect different aspects of how the raw data is downloaded, and processed. The TABLE parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page.","title":"Sensor Parameters"},{"location":"features/feature-introduction/#sensor-providers","text":"Each sensor configuration section can have zero, one or more behavioral feature providers (see #3 in the example). A provider is a script created by the core RAPIDS team or other researchers that extracts behavioral features for that sensor. In this example, accelerometer has two providers: RAPIDS (see #4 ) and PANDA (see #5 ).","title":"Sensor Providers"},{"location":"features/feature-introduction/#provider-parameters","text":"Each provider has parameters that affect the computation of the behavioral features it offers (see #4.1 or #5.1 in the example). These parameters will include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page.","title":"Provider Parameters"},{"location":"features/feature-introduction/#provider-features","text":"Each provider offers a set of behavioral features (see #4.2 or #5.2 in the example). For some providers these features are grouped in an array (like those for RAPIDS provider in #4.2 ) but for others they are grouped in a collection of arrays depending on the meaning and purpose of those features (like those for PANDAS provider in #5.2 ). In either case, you can delete the features you are not interested in and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page.","title":"Provider Features"},{"location":"features/fitbit-heartrate-intraday/","text":"Fitbit Heart Rate Intraday \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [TABLE] Database table name or file path where the heart rate intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#fitbit-heart-rate-intraday","text":"Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [TABLE] Database table name or file path where the heart rate intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed.csv - data/raw/ { pid } /fitbit_heartrate_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"RAPIDS provider"},{"location":"features/fitbit-heartrate-summary/","text":"Fitbit Heart Rate Summary \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [TABLE] Database table name or file path where the heart rate summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#fitbit-heart-rate-summary","text":"Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [TABLE] Database table name or file path where the heart rate summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed.csv - data/raw/ { pid } /fitbit_heartrate_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"RAPIDS provider"},{"location":"features/fitbit-sleep-summary/","text":"Fitbit Sleep Summary \u00b6 Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [TABLE] Database table name or file path where the sleep summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data with Fitbit\u2019s sleep API Version 1 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 2, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 10, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 8100000, \u201cefficiency\u201d: 91, \u201cendTime\u201d: \u201c2020-10-07T18:10:00.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14147921940, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c15:55:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:56:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:57:00\u201d, \u201cvalue\u201d: \u201c2\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 123, \u201cminutesAwake\u201d: 12, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 8, \u201crestlessDuration\u201d: 9, \u201cstartTime\u201d: \u201c2020-10-07T15:55:00.000\u201d, \u201ctimeInBed\u201d: 135}, {\u201cawakeCount\u201d: 0, \u201cawakeDuration\u201d: 0, \u201cawakeningsCount\u201d: 1, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 3780000, \u201cefficiency\u201d: 100, \u201cendTime\u201d: \u201c2020-10-07T10:52:30.000\u201d, \u201cisMainSleep\u201d: false, \u201clogId\u201d: 14144903977, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c09:49:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:50:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:51:00\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 1, \u201cminutesAsleep\u201d: 62, \u201cminutesAwake\u201d: 0, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 1, \u201crestlessDuration\u201d: 1, \u201cstartTime\u201d: \u201c2020-10-07T09:49:00.000\u201d, \u201ctimeInBed\u201d: 63}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 185, \u201ctotalSleepRecords\u201d: 2, \u201ctotalTimeInBed\u201d: 198}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 3, \u201cawakeDuration\u201d: 21, \u201cawakeningsCount\u201d: 16, \u201cdateOfSleep\u201d: \u201c2020-10-08\u201d, \u201cduration\u201d: 19260000, \u201cefficiency\u201d: 89, \u201cendTime\u201d: \u201c2020-10-08T06:01:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14150613895, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:40:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:41:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:42:00\u201d, \u201cvalue\u201d: \u201c3\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 275, \u201cminutesAwake\u201d: 33, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 13, \u201crestlessDuration\u201d: 25, \u201cstartTime\u201d: \u201c2020-10-08T00:40:00.000\u201d, \u201ctimeInBed\u201d: 321}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 275, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 321}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 1, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 8, \u201cdateOfSleep\u201d: \u201c2020-10-09\u201d, \u201cduration\u201d: 19320000, \u201cefficiency\u201d: 96, \u201cendTime\u201d: \u201c2020-10-09T05:57:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14161136803, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:35:30\u201d, \u201cvalue\u201d: \u201c2\u201d}, {\u201cdateTime\u201d: \u201c00:36:30\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c00:37:30\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 309, \u201cminutesAwake\u201d: 13, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 7, \u201crestlessDuration\u201d: 10, \u201cstartTime\u201d: \u201c2020-10-09T00:35:30.000\u201d, \u201ctimeInBed\u201d: 322}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 309, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 322}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type count_awake duration_awake count_awakenings count_restless duration_restless a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 15:55:00 2020-10-07 18:10:00 91 0 123 12 0 135 1 classic 2 3 10 8 9 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 09:49:00 2020-10-07 10:52:30 100 1 62 0 0 63 0 classic 0 0 1 1 1 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 00:40:00 2020-10-08 06:01:30 89 0 275 33 0 321 1 classic 3 21 16 13 25 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 00:35:30 2020-10-09 05:57:30 96 0 309 13 0 322 1 classic 1 3 8 7 10 Example of the structure of source data with Fitbit\u2019s sleep API Version 1.2 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. Fitbit provides 3 types of sleep: main , nap , all . [FEATURES] Features to be computed from sleep summary data, see table below Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations There are three sleep types (TYPE): main , nap , all . The all type contains both main sleep and naps. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type .","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#fitbit-sleep-summary","text":"Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [TABLE] Database table name or file path where the sleep summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data with Fitbit\u2019s sleep API Version 1 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 2, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 10, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 8100000, \u201cefficiency\u201d: 91, \u201cendTime\u201d: \u201c2020-10-07T18:10:00.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14147921940, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c15:55:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:56:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c15:57:00\u201d, \u201cvalue\u201d: \u201c2\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 123, \u201cminutesAwake\u201d: 12, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 8, \u201crestlessDuration\u201d: 9, \u201cstartTime\u201d: \u201c2020-10-07T15:55:00.000\u201d, \u201ctimeInBed\u201d: 135}, {\u201cawakeCount\u201d: 0, \u201cawakeDuration\u201d: 0, \u201cawakeningsCount\u201d: 1, \u201cdateOfSleep\u201d: \u201c2020-10-07\u201d, \u201cduration\u201d: 3780000, \u201cefficiency\u201d: 100, \u201cendTime\u201d: \u201c2020-10-07T10:52:30.000\u201d, \u201cisMainSleep\u201d: false, \u201clogId\u201d: 14144903977, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c09:49:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:50:00\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c09:51:00\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 1, \u201cminutesAsleep\u201d: 62, \u201cminutesAwake\u201d: 0, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 1, \u201crestlessDuration\u201d: 1, \u201cstartTime\u201d: \u201c2020-10-07T09:49:00.000\u201d, \u201ctimeInBed\u201d: 63}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 185, \u201ctotalSleepRecords\u201d: 2, \u201ctotalTimeInBed\u201d: 198}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 3, \u201cawakeDuration\u201d: 21, \u201cawakeningsCount\u201d: 16, \u201cdateOfSleep\u201d: \u201c2020-10-08\u201d, \u201cduration\u201d: 19260000, \u201cefficiency\u201d: 89, \u201cendTime\u201d: \u201c2020-10-08T06:01:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14150613895, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:40:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:41:00\u201d, \u201cvalue\u201d: \u201c3\u201d}, {\u201cdateTime\u201d: \u201c00:42:00\u201d, \u201cvalue\u201d: \u201c3\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 275, \u201cminutesAwake\u201d: 33, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 13, \u201crestlessDuration\u201d: 25, \u201cstartTime\u201d: \u201c2020-10-08T00:40:00.000\u201d, \u201ctimeInBed\u201d: 321}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 275, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 321}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d: [{\u201cawakeCount\u201d: 1, \u201cawakeDuration\u201d: 3, \u201cawakeningsCount\u201d: 8, \u201cdateOfSleep\u201d: \u201c2020-10-09\u201d, \u201cduration\u201d: 19320000, \u201cefficiency\u201d: 96, \u201cendTime\u201d: \u201c2020-10-09T05:57:30.000\u201d, \u201cisMainSleep\u201d: true, \u201clogId\u201d: 14161136803, \u201cminuteData\u201d: [{\u201cdateTime\u201d: \u201c00:35:30\u201d, \u201cvalue\u201d: \u201c2\u201d}, {\u201cdateTime\u201d: \u201c00:36:30\u201d, \u201cvalue\u201d: \u201c1\u201d}, {\u201cdateTime\u201d: \u201c00:37:30\u201d, \u201cvalue\u201d: \u201c1\u201d},\u2026], \u201cminutesAfterWakeup\u201d: 0, \u201cminutesAsleep\u201d: 309, \u201cminutesAwake\u201d: 13, \u201cminutesToFallAsleep\u201d: 0, \u201crestlessCount\u201d: 7, \u201crestlessDuration\u201d: 10, \u201cstartTime\u201d: \u201c2020-10-09T00:35:30.000\u201d, \u201ctimeInBed\u201d: 322}], \u201csummary\u201d: {\u201ctotalMinutesAsleep\u201d: 309, \u201ctotalSleepRecords\u201d: 1, \u201ctotalTimeInBed\u201d: 322}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type count_awake duration_awake count_awakenings count_restless duration_restless a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 15:55:00 2020-10-07 18:10:00 91 0 123 12 0 135 1 classic 2 3 10 8 9 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 09:49:00 2020-10-07 10:52:30 100 1 62 0 0 63 0 classic 0 0 1 1 1 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 00:40:00 2020-10-08 06:01:30 89 0 275 33 0 321 1 classic 3 21 16 13 25 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 00:35:30 2020-10-09 05:57:30 96 0 309 13 0 322 1 classic 1 3 8 7 10 Example of the structure of source data with Fitbit\u2019s sleep API Version 1.2 JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} PLAIN_TEXT device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed.csv - data/raw/ { pid } /fitbit_sleep_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. Fitbit provides 3 types of sleep: main , nap , all . [FEATURES] Features to be computed from sleep summary data, see table below Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations There are three sleep types (TYPE): main , nap , all . The all type contains both main sleep and naps. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type .","title":"RAPIDS provider"},{"location":"features/fitbit-steps-intraday/","text":"Fitbit Steps Intraday \u00b6 Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [TABLE] Database table name or file path where the steps intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0 RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#fitbit-steps-intraday","text":"Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [TABLE] Database table name or file path where the steps intraday data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed.csv - data/raw/ { pid } /fitbit_steps_intraday_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"RAPIDS provider"},{"location":"features/fitbit-steps-summary/","text":"Fitbit Steps Summary \u00b6 Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [TABLE] Database table name or file path where the steps summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_parsed.csv - data/raw/ { pid } /fitbit_steps_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#fitbit-steps-summary","text":"Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [TABLE] Database table name or file path where the steps summary data is stored. The configuration keys in Device Data Source Configuration control whether this parameter is interpreted as table or file. The format of the column(s) containing the Fitbit sensor data can be JSON or PLAIN_TEXT . The data in JSON format is obtained directly from the Fitbit API. We support PLAIN_TEXT in case you already parsed your data and don\u2019t have access to your participants\u2019 Fitbit accounts anymore. If your data is in JSON format then summary and intraday data come packed together. We provide examples of the input format that RAPIDS expects, note that both examples for JSON and PLAIN_TEXT are tabular and the actual format difference comes in the fitbit_data column (we truncate the JSON example for brevity). Example of the structure of source data JSON device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} PLAIN_TEXT device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_parsed.csv - data/raw/ { pid } /fitbit_steps_summary_parsed_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/","text":"Phone Accelerometer \u00b6 Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [TABLE] Database table where the accelerometer data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. PANDA provider \u00b6 These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#phone-accelerometer","text":"Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [TABLE] Database table where the accelerometer data is stored","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem.","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/#panda-provider","text":"These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"PANDA provider"},{"location":"features/phone-activity-recognition/","text":"Phone Activity Recognition \u00b6 Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [TABLE][ANDROID] Database table where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime_unified.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#phone-activity-recognition","text":"Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [TABLE][ANDROID] Database table where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime_unified.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"RAPIDS provider"},{"location":"features/phone-applications-foreground/","text":"Phone Applications Foreground \u00b6 Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [TABLE] Database table where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) RAPIDS provider \u00b6 The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#phone-applications-foreground","text":"Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [TABLE] Database table where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored)","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#rapids-provider","text":"The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"RAPIDS provider"},{"location":"features/phone-battery/","text":"Phone Battery \u00b6 Sensor parameters description for [PHONE_BATTERY] : Key Description [TABLE] Database table where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"Phone Battery"},{"location":"features/phone-battery/#phone-battery","text":"Sensor parameters description for [PHONE_BATTERY] : Key Description [TABLE] Database table where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode","title":"Phone Battery"},{"location":"features/phone-battery/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/","text":"Phone Bluetooth \u00b6 Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [TABLE] Database table where the bluetooth data is stored RAPIDS provider \u00b6 Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider DORYAB provider \u00b6 This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#phone-bluetooth","text":"Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [TABLE] Database table where the bluetooth data is stored","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#rapids-provider","text":"Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/#doryab-provider","text":"This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"DORYAB provider"},{"location":"features/phone-calls/","text":"Phone Calls \u00b6 Sensor parameters description for [PHONE_CALLS] : Key Description [TABLE] Database table where the calls data is stored RAPIDS Provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/raw/ { pid } /phone_calls_with_datetime_unified.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"Phone Calls"},{"location":"features/phone-calls/#phone-calls","text":"Sensor parameters description for [PHONE_CALLS] : Key Description [TABLE] Database table where the calls data is stored","title":"Phone Calls"},{"location":"features/phone-calls/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/raw/ { pid } /phone_calls_with_datetime_unified.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"RAPIDS Provider"},{"location":"features/phone-conversation/","text":"Phone Conversation \u00b6 Sensor parameters description for [PHONE_CONVERSATION] : Key Description [TABLE][ANDROID] Database table where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/raw/ { pid } /phone_conversation_with_datetime_unified.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"Phone Conversation"},{"location":"features/phone-conversation/#phone-conversation","text":"Sensor parameters description for [PHONE_CONVERSATION] : Key Description [TABLE][ANDROID] Database table where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [TABLE][IOS] Database table where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS)","title":"Phone Conversation"},{"location":"features/phone-conversation/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/raw/ { pid } /phone_conversation_with_datetime_unified.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"RAPIDS provider"},{"location":"features/phone-data-yield/","text":"Phone Data Yield \u00b6 This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_FOREGROUND PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_MESSAGES PHONE_LIGHT PHONE_LOCATIONS PHONE_SCREEN PHONE_WIFI_VISIBLE PHONE_WIFI_CONNECTED RAPIDS provider \u00b6 Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes rows The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours lux The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#phone-data-yield","text":"This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_FOREGROUND PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_MESSAGES PHONE_LIGHT PHONE_LOCATIONS PHONE_SCREEN PHONE_WIFI_VISIBLE PHONE_WIFI_CONNECTED","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#rapids-provider","text":"Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes rows The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours lux The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"RAPIDS provider"},{"location":"features/phone-light/","text":"Phone Light \u00b6 Sensor parameters description for [PHONE_LIGHT] : Key Description [TABLE] Database table where the light data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"Phone Light"},{"location":"features/phone-light/#phone-light","text":"Sensor parameters description for [PHONE_LIGHT] : Key Description [TABLE] Database table where the light data is stored","title":"Phone Light"},{"location":"features/phone-light/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-locations/","text":"Phone Locations \u00b6 Sensor parameters description for [PHONE_LOCATIONS] : Key Description [TABLE] Database table where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS or FUSED_RESAMPLED . This filter is based on the provider column of the AWARE locations table, ALL includes every row, GPS only includes rows where provider is gps, and FUSED_RESAMPLED only includes rows where provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use AWARE Android and iOS clients can collect location coordinates through the phone's GPS, the network cellular towers around the phone or Google's fused location API. If you want to use only the GPS provider set [LOCATIONS_TO_USE] to GPS , if you want to use all providers (not recommended due to the difference in accuracy) set [LOCATIONS_TO_USE] to ALL , if your AWARE client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . RESAMPLE_FUSED takes the original fused location coordinates and replicates each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] , this is done because Google's API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair (for example, participant A's phone did not collect data between 10.30am and 10:50am and between 11:05am and 11:40am, the last known coordinate pair will be replicated during the first period but not the second, in other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes). FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer that this threshold even if the phone was sensing data continuously (for example, participant A went home at 9pm and their phone was sensing data without gaps until 11am the next morning, the last known location will only be replicated until 9am). If you have suggestions to modify or improve this resampling, let us know. BARNETT provider \u00b6 These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same day (daily segments) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [TIMEZONE] Timezone where the location data was collected. By default points to the one defined in the Configuration [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am including any pauses within a 200-meter radius. disttravelled meters Total distance travelled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Barnett's et al features These features are based on a Pause-Flight model. A pause is defined as a mobiity trace (location pings) within a certain duration and distance (by default 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS we only expose two parameters for these features (timezone and accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) will count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al . DORYAB provider \u00b6 These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_GAP_ALLOWED] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the the phone was not sensing. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [SAMPLING_FREQUENCY] Expected time difference between any two location rows in minutes. If set to 0 , the sampling frequency will be inferred automatically as the median of all the differences between any two consecutive row timestamps (recommended if you are using FUSED_RESAMPLED data). This parameter impacts all the time calculations. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance travelled in a time segment using the haversine formula. averagespeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - \"It encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between the number of rows labeled Moving versus Static outlierstimepercent - Ratio between the number of rows that belong to non-significant clusters divided by the total number of rows in a time segment. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). meanlengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters, it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"Phone Locations"},{"location":"features/phone-locations/#phone-locations","text":"Sensor parameters description for [PHONE_LOCATIONS] : Key Description [TABLE] Database table where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS or FUSED_RESAMPLED . This filter is based on the provider column of the AWARE locations table, ALL includes every row, GPS only includes rows where provider is gps, and FUSED_RESAMPLED only includes rows where provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row will be resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use AWARE Android and iOS clients can collect location coordinates through the phone's GPS, the network cellular towers around the phone or Google's fused location API. If you want to use only the GPS provider set [LOCATIONS_TO_USE] to GPS , if you want to use all providers (not recommended due to the difference in accuracy) set [LOCATIONS_TO_USE] to ALL , if your AWARE client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . RESAMPLE_FUSED takes the original fused location coordinates and replicates each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] , this is done because Google's API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair (for example, participant A's phone did not collect data between 10.30am and 10:50am and between 11:05am and 11:40am, the last known coordinate pair will be replicated during the first period but not the second, in other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes). FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer that this threshold even if the phone was sensing data continuously (for example, participant A went home at 9pm and their phone was sensing data without gaps until 11am the next morning, the last known location will only be replicated until 9am). If you have suggestions to modify or improve this resampling, let us know.","title":"Phone Locations"},{"location":"features/phone-locations/#barnett-provider","text":"These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same day (daily segments) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [TIMEZONE] Timezone where the location data was collected. By default points to the one defined in the Configuration [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am including any pauses within a 200-meter radius. disttravelled meters Total distance travelled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Barnett's et al features These features are based on a Pause-Flight model. A pause is defined as a mobiity trace (location pings) within a certain duration and distance (by default 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS we only expose two parameters for these features (timezone and accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) will count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"BARNETT provider"},{"location":"features/phone-locations/#doryab-provider","text":"These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_GAP_ALLOWED] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the the phone was not sensing. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes, the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [SAMPLING_FREQUENCY] Expected time difference between any two location rows in minutes. If set to 0 , the sampling frequency will be inferred automatically as the median of all the differences between any two consecutive row timestamps (recommended if you are using FUSED_RESAMPLED data). This parameter impacts all the time calculations. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance travelled in a time segment using the haversine formula. averagespeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - \"It encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between the number of rows labeled Moving versus Static outlierstimepercent - Ratio between the number of rows that belong to non-significant clusters divided by the total number of rows in a time segment. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). meanlengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters, it will be higher the more rows belong to a cluster (i.e. the more time a participant spent at a significant location). Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"DORYAB provider"},{"location":"features/phone-messages/","text":"Phone Messages \u00b6 Sensor parameters description for [PHONE_MESSAGES] : Key Description [TABLE] Database table where the messages data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"Phone Messages"},{"location":"features/phone-messages/#phone-messages","text":"Sensor parameters description for [PHONE_MESSAGES] : Key Description [TABLE] Database table where the messages data is stored","title":"Phone Messages"},{"location":"features/phone-messages/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"RAPIDS provider"},{"location":"features/phone-screen/","text":"Phone Screen \u00b6 Sensor parameters description for [PHONE_SCREEN] : Key Description [TABLE] Database table where the screen data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/raw/ { pid } /phone_screen_with_datetime_unified.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes |firstuseafter |minutes |Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"Phone Screen"},{"location":"features/phone-screen/#phone-screen","text":"Sensor parameters description for [PHONE_SCREEN] : Key Description [TABLE] Database table where the screen data is stored","title":"Phone Screen"},{"location":"features/phone-screen/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/raw/ { pid } /phone_screen_with_datetime_unified.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes |firstuseafter |minutes |Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"RAPIDS provider"},{"location":"features/phone-wifi-connected/","text":"Phone WiFi Connected \u00b6 Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [TABLE] Database table where the wifi (connected) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"Phone WiFI Connected"},{"location":"features/phone-wifi-connected/#phone-wifi-connected","text":"Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [TABLE] Database table where the wifi (connected) data is stored","title":"Phone WiFi Connected"},{"location":"features/phone-wifi-connected/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"RAPIDS provider"},{"location":"features/phone-wifi-visible/","text":"Phone WiFi Visible \u00b6 Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [TABLE] Database table where the wifi (visible) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"Phone WiFI Visible"},{"location":"features/phone-wifi-visible/#phone-wifi-visible","text":"Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [TABLE] Database table where the wifi (visible) data is stored","title":"Phone WiFi Visible"},{"location":"features/phone-wifi-visible/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"RAPIDS provider"},{"location":"setup/configuration/","text":"Configuration \u00b6 You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features Add your database credentials Choose the timezone of your study Create your participants files Select what time segments you want to extract features on Modify your device data source configuration Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs we are referring to the corresponding key in the config.yaml file. Database credentials \u00b6 Create an empty file called .env in your RAPIDS root directory Add the following lines and replace your database-specific credentials (user, password, host, and database): [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE Warning The label MY_GROUP is arbitrary but it has to match the following config.yaml key: DATABASE_GROUP : &database_group MY_GROUP Note You can ignore this step if you are only processing Fitbit data in CSV files. Timezone of your study \u00b6 Single timezone \u00b6 If your study only happened in a single time zone, select the appropriate code form this list and change the following config key. Double check your timezone code pick, for example US Eastern Time is America/New_York not EST TIMEZONE : &timezone America/New_York Multiple timezones \u00b6 Support coming soon. Participant files \u00b6 Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Note The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Tip Attribute values of the [PHONE] and [FITBIT] sections in every participant file are optional which allows you to analyze data from participants that only carried smartphones, only Fitbit devices, or both. Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you can run the following command and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py Structure of participants files \u00b6 Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, and a fitbit device throughout the study between Apr 23 rd 2020 and Oct 28 th 2020 PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 For [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study, in this case, data from all their devices will be joined and relabeled with the last 1 on this list. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). If you have an aware_device table in your database you can set [PLATFORMS]: [multiple] and RAPIDS will infer them automatically. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis For [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study, in this case, data from all devices will be joined and relabeled with the last device_id on this list. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis Automatic creation of participant files \u00b6 You have two options a) use the aware_device table in your database or b) use a CSV file. In either case, in your config.yaml , set [PHONE_SECTION][ADD] or [FITBIT_SECTION][ADD] to TRUE depending on what devices you used in your study. Set [DEVICE_ID_COLUMN] to the name of the column that uniquely identifies each device and include any device ids you want to ignore in [IGNORED_DEVICE_IDS] . aware_device table Set the following keys in your config.yaml CREATE_PARTICIPANT_FILES : SOURCE : TYPE : AWARE_DEVICE_TABLE DATABASE_GROUP : *database_group CSV_FILE_PATH : \"\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Then run snakemake -j1 create_participants_files CSV file Set the following keys in your config.yaml . CREATE_PARTICIPANT_FILES : SOURCE : TYPE : CSV_FILE DATABASE_GROUP : \"\" CSV_FILE_PATH : \"your_path/to_your.csv\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Your CSV file ( [SOURCE][CSV_FILE_PATH] ) should have the following columns but you can omit any values you don\u2019t have on each column: Column Description phone device id The name of this column has to match [PHONE_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; fitbit device id The name of this column has to match [FITBIT_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; pid Unique identifiers with the format pXXX (your participant files will be named with this string platform Use android , ios or multiple as explained above, separate values with ; label A human readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD . end_date A string with format YYY-MM-DD . Example device_id,pid,label,platform,start_date,end_date,fitbit_id a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43,p01,julio,android;ios,2020-01-01,2021-01-01,fitbit1 4c4cf7a1-0340-44bc-be0f-d5053bf7390c,p02,meng,ios,2021-01-01,2022-01-01,fitbit2 Then run snakemake -j1 create_participants_files Time Segments \u00b6 Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data on every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed on every day and all have the same duration (for example 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day, or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past as to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday March 7 th 2020 and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would start on Sunday March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment.It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes and seconds. For example 7D 23H 59M 59S repeats_on One of the follow options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segments instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and will last 23:59:59 , the other four segments will start at 6am, 12pm, 6pm, and 12am respectively and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute but they can be as long as you want. The start of each segment can be shifted backwards or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be computed across all these segments). There cannot be two overlaping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes, and seconds. For example 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes and seconds. For example 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyse. If you have participants with multiple device ids you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes and stress2 and stress4 are shifted backwards by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some necessary information to compute a few of such features will be extracted from all three segments, for example the phone contact that called a participant the most or the location clusters visited by a participant. Segment Examples \u00b6 5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 Device Data Source Configuration \u00b6 You might need to modify the following config keys in your config.yaml depending on what devices your participants used and where you are storing your data. You can ignore [PHONE_DATA_CONFIGURATION] or [FITBIT_DATA_CONFIGURATION] if you are not working with either devices. Phone The relevant config.yaml section looks like this by default: PHONE_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # SINGLE (MULTIPLE support coming soon) VALUE : *timezone Parameters for [PHONE_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] Only DATABASE is supported (phone data will be pulled from a database) [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify smartphones. For data collected with AWARE this is usually device_id [TIMEZONE] [TYPE] Only SINGLE is supported for now [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Fitbit The relevant config.yaml section looks like this by default: FITBIT_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE # DATABASE or FILES (set each [FITBIT_SENSOR][TABLE] attribute with a table name or a file path accordingly) COLUMN_FORMAT : JSON # JSON or PLAIN_TEXT DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # Fitbit devices don't support time zones so we read this data in the timezone indicated by VALUE VALUE : *timezone Parameters for For [FITBIT_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] DATABASE or FILES (set each [FITBIT_SENSOR] [TABLE] attribute accordingly with a table name or a file path) [SOURCE] [COLUMN_FORMAT] JSON or PLAIN_TEXT . Column format of the source data. If you pulled your data directly from the Fitbit API the column containing the sensor data will be in JSON format [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials . Only used if [TYPE] is DATABASE . [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify Fitbit devices. [TIMEZONE] [TYPE] Only SINGLE is supported (Fitbit devices always store data in local time). [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Sensor and Features to Process \u00b6 Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Configuration"},{"location":"setup/configuration/#configuration","text":"You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features Add your database credentials Choose the timezone of your study Create your participants files Select what time segments you want to extract features on Modify your device data source configuration Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs we are referring to the corresponding key in the config.yaml file.","title":"Configuration"},{"location":"setup/configuration/#database-credentials","text":"Create an empty file called .env in your RAPIDS root directory Add the following lines and replace your database-specific credentials (user, password, host, and database): [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE Warning The label MY_GROUP is arbitrary but it has to match the following config.yaml key: DATABASE_GROUP : &database_group MY_GROUP Note You can ignore this step if you are only processing Fitbit data in CSV files.","title":"Database credentials"},{"location":"setup/configuration/#timezone-of-your-study","text":"","title":"Timezone of your study"},{"location":"setup/configuration/#single-timezone","text":"If your study only happened in a single time zone, select the appropriate code form this list and change the following config key. Double check your timezone code pick, for example US Eastern Time is America/New_York not EST TIMEZONE : &timezone America/New_York","title":"Single timezone"},{"location":"setup/configuration/#multiple-timezones","text":"Support coming soon.","title":"Multiple timezones"},{"location":"setup/configuration/#participant-files","text":"Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Note The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Tip Attribute values of the [PHONE] and [FITBIT] sections in every participant file are optional which allows you to analyze data from participants that only carried smartphones, only Fitbit devices, or both. Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you can run the following command and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py","title":"Participant files"},{"location":"setup/configuration/#structure-of-participants-files","text":"Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, and a fitbit device throughout the study between Apr 23 rd 2020 and Oct 28 th 2020 PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 For [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study, in this case, data from all their devices will be joined and relabeled with the last 1 on this list. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). If you have an aware_device table in your database you can set [PLATFORMS]: [multiple] and RAPIDS will infer them automatically. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis For [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study, in this case, data from all devices will be joined and relabeled with the last device_id on this list. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYY-MM-DD . Only data collected after this date will be included in the analysis [END_DATE] A string with format YYY-MM-DD . Only data collected before this date will be included in the analysis","title":"Structure of participants files"},{"location":"setup/configuration/#automatic-creation-of-participant-files","text":"You have two options a) use the aware_device table in your database or b) use a CSV file. In either case, in your config.yaml , set [PHONE_SECTION][ADD] or [FITBIT_SECTION][ADD] to TRUE depending on what devices you used in your study. Set [DEVICE_ID_COLUMN] to the name of the column that uniquely identifies each device and include any device ids you want to ignore in [IGNORED_DEVICE_IDS] . aware_device table Set the following keys in your config.yaml CREATE_PARTICIPANT_FILES : SOURCE : TYPE : AWARE_DEVICE_TABLE DATABASE_GROUP : *database_group CSV_FILE_PATH : \"\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Then run snakemake -j1 create_participants_files CSV file Set the following keys in your config.yaml . CREATE_PARTICIPANT_FILES : SOURCE : TYPE : CSV_FILE DATABASE_GROUP : \"\" CSV_FILE_PATH : \"your_path/to_your.csv\" TIMEZONE : *timezone PHONE_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : device_id # column name IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE DEVICE_ID_COLUMN : fitbit_id # column name IGNORED_DEVICE_IDS : [] Your CSV file ( [SOURCE][CSV_FILE_PATH] ) should have the following columns but you can omit any values you don\u2019t have on each column: Column Description phone device id The name of this column has to match [PHONE_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; fitbit device id The name of this column has to match [FITBIT_SECTION][DEVICE_ID_COLUMN] . Separate multiple ids with ; pid Unique identifiers with the format pXXX (your participant files will be named with this string platform Use android , ios or multiple as explained above, separate values with ; label A human readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD . end_date A string with format YYY-MM-DD . Example device_id,pid,label,platform,start_date,end_date,fitbit_id a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43,p01,julio,android;ios,2020-01-01,2021-01-01,fitbit1 4c4cf7a1-0340-44bc-be0f-d5053bf7390c,p02,meng,ios,2021-01-01,2022-01-01,fitbit2 Then run snakemake -j1 create_participants_files","title":"Automatic creation of participant files"},{"location":"setup/configuration/#time-segments","text":"Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data on every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed on every day and all have the same duration (for example 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day, or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past as to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday March 7 th 2020 and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would start on Sunday March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment.It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes and seconds. For example 7D 23H 59M 59S repeats_on One of the follow options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segments instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and will last 23:59:59 , the other four segments will start at 6am, 12pm, 6pm, and 12am respectively and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute but they can be as long as you want. The start of each segment can be shifted backwards or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be computed across all these segments). There cannot be two overlaping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes, and seconds. For example 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent a number of days, hours, minutes and seconds. For example 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyse. If you have participants with multiple device ids you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes and stress2 and stress4 are shifted backwards by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some necessary information to compute a few of such features will be extracted from all three segments, for example the phone contact that called a participant the most or the location clusters visited by a participant.","title":"Time Segments"},{"location":"setup/configuration/#segment-examples","text":"5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3","title":"Segment Examples"},{"location":"setup/configuration/#device-data-source-configuration","text":"You might need to modify the following config keys in your config.yaml depending on what devices your participants used and where you are storing your data. You can ignore [PHONE_DATA_CONFIGURATION] or [FITBIT_DATA_CONFIGURATION] if you are not working with either devices. Phone The relevant config.yaml section looks like this by default: PHONE_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # SINGLE (MULTIPLE support coming soon) VALUE : *timezone Parameters for [PHONE_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] Only DATABASE is supported (phone data will be pulled from a database) [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify smartphones. For data collected with AWARE this is usually device_id [TIMEZONE] [TYPE] Only SINGLE is supported for now [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study Fitbit The relevant config.yaml section looks like this by default: FITBIT_DATA_CONFIGURATION : SOURCE : TYPE : DATABASE # DATABASE or FILES (set each [FITBIT_SENSOR][TABLE] attribute with a table name or a file path accordingly) COLUMN_FORMAT : JSON # JSON or PLAIN_TEXT DATABASE_GROUP : *database_group DEVICE_ID_COLUMN : device_id # column name TIMEZONE : TYPE : SINGLE # Fitbit devices don't support time zones so we read this data in the timezone indicated by VALUE VALUE : *timezone Parameters for For [FITBIT_DATA_CONFIGURATION] Key Description [SOURCE] [TYPE] DATABASE or FILES (set each [FITBIT_SENSOR] [TABLE] attribute accordingly with a table name or a file path) [SOURCE] [COLUMN_FORMAT] JSON or PLAIN_TEXT . Column format of the source data. If you pulled your data directly from the Fitbit API the column containing the sensor data will be in JSON format [SOURCE] [DATABASE_GROUP] *database_group points to the value defined before in Database credentials . Only used if [TYPE] is DATABASE . [SOURCE] [DEVICE_ID_COLUMN] A column that contains strings that uniquely identify Fitbit devices. [TIMEZONE] [TYPE] Only SINGLE is supported (Fitbit devices always store data in local time). [TIMEZONE] [VALUE] *timezone points to the value defined before in Timezone of your study","title":"Device Data Source Configuration"},{"location":"setup/configuration/#sensor-and-features-to-process","text":"Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Sensor and Features to Process"},{"location":"setup/execution/","text":"Execution \u00b6 After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction Info The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will update the output file with the correct features. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be load in memory at the same time. If RAPIDS crashes because it ran out of memory reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Forcing a complete rerun If you want to update your data from your database or rerun the whole pipeline from scratch run one or both of the following commands depending on the devices you are using: ./rapids -j1 -R download_phone_data ./rapids -j1 -R download_fitbit_data Deleting RAPIDS output If you want to delete all the output files RAPIDS produces you can execute the following command: ./rapids -j1 --delete-all-output","title":"Execution"},{"location":"setup/execution/#execution","text":"After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction Info The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will update the output file with the correct features. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be load in memory at the same time. If RAPIDS crashes because it ran out of memory reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Forcing a complete rerun If you want to update your data from your database or rerun the whole pipeline from scratch run one or both of the following commands depending on the devices you are using: ./rapids -j1 -R download_phone_data ./rapids -j1 -R download_fitbit_data Deleting RAPIDS output If you want to delete all the output files RAPIDS produces you can execute the following command: ./rapids -j1 --delete-all-output","title":"Execution"},{"location":"setup/installation/","text":"Installation \u00b6 You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Ubuntu Docker Install Docker Pull our RAPIDS container docker pull agamk/rapids:latest ` Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it agamk/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the agamk/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder insidethe container. MacOS We tested these instructions in Catalina Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested on Ubuntu 18.04 & 20.04 Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository For 18.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' For 20.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1","title":"Installation"},{"location":"setup/installation/#installation","text":"You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Ubuntu Docker Install Docker Pull our RAPIDS container docker pull agamk/rapids:latest ` Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it agamk/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the agamk/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder insidethe container. MacOS We tested these instructions in Catalina Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested on Ubuntu 18.04 & 20.04 Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository For 18.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' For 20.04 sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1","title":"Installation"},{"location":"visualizations/data-quality-visualizations/","text":"Data Quality Visualizations \u00b6 We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (t01 and t02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.) 1. Histograms of phone data yield \u00b6 RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, afternoon, evening, and night) 2. Heatmaps of overall data yield \u00b6 These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis is labelled with the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants t01 and t02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants 3. Heatmap of recorded phone sensors \u00b6 In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant t01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 8 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant 4. Heatmap of sensor row count \u00b6 These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 16 phone sensors (including data yield) of t01\u2019s daily segments (only half of the sensor names and dates are visible in the screenshot but all can be accessed in the interactive plot). From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"Data Quality"},{"location":"visualizations/data-quality-visualizations/#data-quality-visualizations","text":"We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (t01 and t02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.)","title":"Data Quality Visualizations"},{"location":"visualizations/data-quality-visualizations/#1-histograms-of-phone-data-yield","text":"RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, afternoon, evening, and night)","title":"1. Histograms of phone data yield"},{"location":"visualizations/data-quality-visualizations/#2-heatmaps-of-overall-data-yield","text":"These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis is labelled with the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants t01 and t02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants","title":"2. Heatmaps of overall data yield"},{"location":"visualizations/data-quality-visualizations/#3-heatmap-of-recorded-phone-sensors","text":"In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant t01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 8 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant","title":"3. Heatmap of recorded phone sensors"},{"location":"visualizations/data-quality-visualizations/#4-heatmap-of-sensor-row-count","text":"These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 16 phone sensors (including data yield) of t01\u2019s daily segments (only half of the sensor names and dates are visible in the screenshot but all can be accessed in the interactive plot). From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"4. Heatmap of sensor row count"},{"location":"visualizations/feature-visualizations/","text":"Feature Visualizations \u00b6 1. Heatmap Correlation Matrix \u00b6 Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"Features"},{"location":"visualizations/feature-visualizations/#feature-visualizations","text":"","title":"Feature Visualizations"},{"location":"visualizations/feature-visualizations/#1-heatmap-correlation-matrix","text":"Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"1. Heatmap Correlation Matrix"},{"location":"workflow-examples/analysis/","text":"Analysis Workflow Example \u00b6 TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers Why should I integrate my analysis in RAPIDS? \u00b6 Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS. Analysis workflow structure \u00b6 To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules . Description of the study modeled in our analysis workflow example \u00b6 Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share a database with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation Configure and run the analysis workflow example \u00b6 Install RAPIDS Configure the user credentials of a local or remote MySQL server with writing permissions in your .env file. The example config file is at example_profile/example_config.yaml . Unzip the test database to data/external/rapids_example.sql and run: ./rapids -j1 restore_sql_file --profile example_profile Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile Modules of our analysis workflow example \u00b6 1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the participant_info table of our test database . As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the participant_target table of our test database and transferred to a CSV file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_features_for_population_model rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 162 features over 11 days for the individual model of p01, 107 features over 12 days for the individual model of p02 and 101 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Analysis"},{"location":"workflow-examples/analysis/#analysis-workflow-example","text":"TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers","title":"Analysis Workflow Example"},{"location":"workflow-examples/analysis/#why-should-i-integrate-my-analysis-in-rapids","text":"Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS.","title":"Why should I integrate my analysis in RAPIDS?"},{"location":"workflow-examples/analysis/#analysis-workflow-structure","text":"To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules .","title":"Analysis workflow structure"},{"location":"workflow-examples/analysis/#description-of-the-study-modeled-in-our-analysis-workflow-example","text":"Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share a database with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation","title":"Description of the study modeled in our analysis workflow example"},{"location":"workflow-examples/analysis/#configure-and-run-the-analysis-workflow-example","text":"Install RAPIDS Configure the user credentials of a local or remote MySQL server with writing permissions in your .env file. The example config file is at example_profile/example_config.yaml . Unzip the test database to data/external/rapids_example.sql and run: ./rapids -j1 restore_sql_file --profile example_profile Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile","title":"Configure and run the analysis workflow example"},{"location":"workflow-examples/analysis/#modules-of-our-analysis-workflow-example","text":"1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the participant_info table of our test database . As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the participant_target table of our test database and transferred to a CSV file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_features_for_population_model rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 162 features over 11 days for the individual model of p01, 107 features over 12 days for the individual model of p02 and 101 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Modules of our analysis workflow example"},{"location":"workflow-examples/minimal/","text":"Minimal Working Example \u00b6 This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for daily and night epochs of one participant monitored on the US East coast. Install RAPIDS and make sure your conda environment is active (see Installation ) Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Things to change on each configuration step 1. Setup your database connection credentials in .env . We assume your credentials group is called MY_GROUP . 2. America/New_York should be the default timezone 3. Create a participant file p01.yaml based on one of your participants and add p01 to [PIDS] in config.yaml . The following would be the content of your p01.yaml participant file: PHONE : DEVICE_IDS : [ aaaaaaaa-1111-bbbb-2222-cccccccccccc ] # your participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty 4. [TIME_SEGMENTS][TYPE] should be the default PERIODIC . Change [TIME_SEGMENTS][FILE] with the path of a file containing the following lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 5. If you collected data with AWARE you won\u2019t need to modify the attributes of [DEVICE_DATA][PHONE] 6. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True Example of the config.yaml sections after the changes outlined above PIDS: [p01] TIMEZONE: &timezone America/New_York DATABASE_GROUP: &database_group MY_GROUP # ... other irrelevant sections TIME_SEGMENTS: &time_segments TYPE: PERIODIC FILE: \"data/external/timesegments_periodic.csv\" # make sure the three lines specified above are in the file INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # No need to change this if you collected AWARE data on a database and your credentials are grouped under `MY_GROUP` in `.env` DEVICE_DATA: PHONE: SOURCE: TYPE: DATABASE DATABASE_GROUP: *database_group DEVICE_ID_COLUMN: device_id # column name TIMEZONE: TYPE: SINGLE # SINGLE or MULTIPLE VALUE: *timezone ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS: TABLE: calls # change if your calls table has a different name PROVIDERS: RAPIDS: COMPUTE: True # set this to True! CALL_TYPES: ... Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in /data/processed/features/p01/phone_calls.csv","title":"Minimal"},{"location":"workflow-examples/minimal/#minimal-working-example","text":"This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for daily and night epochs of one participant monitored on the US East coast. Install RAPIDS and make sure your conda environment is active (see Installation ) Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Things to change on each configuration step 1. Setup your database connection credentials in .env . We assume your credentials group is called MY_GROUP . 2. America/New_York should be the default timezone 3. Create a participant file p01.yaml based on one of your participants and add p01 to [PIDS] in config.yaml . The following would be the content of your p01.yaml participant file: PHONE : DEVICE_IDS : [ aaaaaaaa-1111-bbbb-2222-cccccccccccc ] # your participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty 4. [TIME_SEGMENTS][TYPE] should be the default PERIODIC . Change [TIME_SEGMENTS][FILE] with the path of a file containing the following lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 5. If you collected data with AWARE you won\u2019t need to modify the attributes of [DEVICE_DATA][PHONE] 6. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True Example of the config.yaml sections after the changes outlined above PIDS: [p01] TIMEZONE: &timezone America/New_York DATABASE_GROUP: &database_group MY_GROUP # ... other irrelevant sections TIME_SEGMENTS: &time_segments TYPE: PERIODIC FILE: \"data/external/timesegments_periodic.csv\" # make sure the three lines specified above are in the file INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # No need to change this if you collected AWARE data on a database and your credentials are grouped under `MY_GROUP` in `.env` DEVICE_DATA: PHONE: SOURCE: TYPE: DATABASE DATABASE_GROUP: *database_group DEVICE_ID_COLUMN: device_id # column name TIMEZONE: TYPE: SINGLE # SINGLE or MULTIPLE VALUE: *timezone ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS: TABLE: calls # change if your calls table has a different name PROVIDERS: RAPIDS: COMPUTE: True # set this to True! CALL_TYPES: ... Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in /data/processed/features/p01/phone_calls.csv","title":"Minimal Working Example"}]} \ No newline at end of file diff --git a/0.2/setup/configuration/index.html b/0.2/setup/configuration/index.html index 503aa1e9..46269b92 100644 --- a/0.2/setup/configuration/index.html +++ b/0.2/setup/configuration/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/setup/execution/index.html b/0.2/setup/execution/index.html index 48c9a173..0dcaffa2 100644 --- a/0.2/setup/execution/index.html +++ b/0.2/setup/execution/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/setup/installation/index.html b/0.2/setup/installation/index.html index c1de499c..92be7c2c 100644 --- a/0.2/setup/installation/index.html +++ b/0.2/setup/installation/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/sitemap.xml b/0.2/sitemap.xml index 0e84b3ba..b04f2fc2 100644 --- a/0.2/sitemap.xml +++ b/0.2/sitemap.xml @@ -1,171 +1,171 @@ - None - 2020-12-18 + https://www.rapids.science/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/file-structure/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/setup/installation/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/setup/configuration/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/setup/execution/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/workflow-examples/minimal/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/workflow-examples/analysis/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/feature-introduction/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-accelerometer/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-activity-recognition/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-applications-foreground/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-battery/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-bluetooth/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-calls/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-conversation/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-data-yield/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-light/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-locations/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-messages/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-screen/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-wifi-connected/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/phone-wifi-visible/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/fitbit-heartrate-summary/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/fitbit-heartrate-intraday/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/fitbit-sleep-summary/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/fitbit-steps-summary/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/fitbit-steps-intraday/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/features/add-new-features/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/visualizations/data-quality-visualizations/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/visualizations/feature-visualizations/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/git-flow/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/remote-support/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/virtual-environments/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/documentation/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/testing/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/developers/test-cases/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/migrating-from-old-versions/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/code_of_conduct/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/faq/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/team/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/change-log/ + 2021-03-29 daily - None - 2020-12-18 + https://www.rapids.science/citation/ + 2021-03-29 daily \ No newline at end of file diff --git a/0.2/sitemap.xml.gz b/0.2/sitemap.xml.gz index c19b574dd988fde509aff3ba780af6142e13728f..413f7628c5fa7ac5b5137b149ffc5c483dd3be0b 100644 GIT binary patch literal 597 zcmV-b0;>HViwFqdCzMzXUh|` zbHGTnO;i#ckv`}8^py@y5VTW=0`y{=qO1?1N&V94@h!#O8H*6-hyCs0xZh#+94LhKNH98tPNnG!^saF8Khgv3NLBD5@&_XkH=3x}GUMq>K`I8U6>f%h1(a6-jml27SO zdT|9kkD07a<2*<5*P0@3;DTe}B?p6~FQ7Hj7mH+ZL1os-Ae*lXnONlF+>{U)O^W~u z?m`$XWs%ecA^g8{f&Ru+i#Yg^H-g5b4T zs-*v;%^vOh9?l+Uz%xdktMu!}`ct<3uraKHiLVuVp1G(oxHA>45p|gB6X%3kt%WtP zUmBMgY5$2$QIzs%kg9If#75yuG`2IwJi4K9avX!(&a6$~c@Fr@!*kW8?QqJ(ItH&6 jFccw~h|u)|bvI}t&piF-N_2C*32Ofvvknp4;TZq`zE3Ay literal 216 zcmV;}04M(+iwFoH+1y_O|8r?{Wo=<_E_iKh0PW4O4uUWg1<;+ZAl+$!2~8=RqmzCB zDHTYdMP3#Cdnv)h(GR#?+TM3xj;vne5I@aKy|X3Dc*rY z#>Z>fQpq~+r=t+dvT*9X8S9C-n_EG;TR$ZBP^A38RM=Hf9F82ER%d686yfy^Yx1LO zRG#K(lI2NOtm8e~E4r&~Yi_f-tETRUw-i6g){!DGuwcQ01q&7|Sg>Hhf&~i}ELgDq Sx1#ML - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/visualizations/data-quality-visualizations/index.html b/0.2/visualizations/data-quality-visualizations/index.html index 0e7b800f..326cbaed 100644 --- a/0.2/visualizations/data-quality-visualizations/index.html +++ b/0.2/visualizations/data-quality-visualizations/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/visualizations/feature-visualizations/index.html b/0.2/visualizations/feature-visualizations/index.html index 7d1b93ec..e16ada06 100644 --- a/0.2/visualizations/feature-visualizations/index.html +++ b/0.2/visualizations/feature-visualizations/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/workflow-examples/analysis/index.html b/0.2/workflow-examples/analysis/index.html index b9579f5c..d3459f6f 100644 --- a/0.2/workflow-examples/analysis/index.html +++ b/0.2/workflow-examples/analysis/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file diff --git a/0.2/workflow-examples/minimal/index.html b/0.2/workflow-examples/minimal/index.html index 90a06d80..e1225066 100644 --- a/0.2/workflow-examples/minimal/index.html +++ b/0.2/workflow-examples/minimal/index.html @@ -9,8 +9,13 @@ - - + + + + + + + @@ -18,20 +23,21 @@ - + - + + - + - + @@ -55,7 +61,9 @@ - + + + @@ -72,27 +80,44 @@ + + +
-
+ +
-
+
+
+
+
+
+ - - - - + + - - \ No newline at end of file