diff --git a/1.3/404.html b/1.3/404.html new file mode 100644 index 00000000..612948c5 --- /dev/null +++ b/1.3/404.html @@ -0,0 +1,1765 @@ + + + + + + + + + + + + + + + + + + + + RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +

404 - Not found

+ + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/CNAME b/1.3/CNAME new file mode 100644 index 00000000..17ba96bf --- /dev/null +++ b/1.3/CNAME @@ -0,0 +1 @@ +www.rapids.science \ No newline at end of file diff --git a/1.3/assets/images/favicon.png b/1.3/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/1.3/assets/images/favicon.png differ diff --git a/1.3/assets/javascripts/bundle.0c4ae912.min.js b/1.3/assets/javascripts/bundle.0c4ae912.min.js new file mode 100644 index 00000000..f96c24c9 --- /dev/null +++ b/1.3/assets/javascripts/bundle.0c4ae912.min.js @@ -0,0 +1,108 @@ +(()=>{var oa=Object.create,St=Object.defineProperty;var na=Object.getOwnPropertyDescriptor;var ia=Object.getOwnPropertyNames,wt=Object.getOwnPropertySymbols,aa=Object.getPrototypeOf,ar=Object.prototype.hasOwnProperty,Kr=Object.prototype.propertyIsEnumerable;var Br=(e,t,r)=>t in e?St(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))ar.call(t,r)&&Br(e,r,t[r]);if(wt)for(var r of wt(t))Kr.call(t,r)&&Br(e,r,t[r]);return e};var sa=e=>St(e,"__esModule",{value:!0});var Yr=(e,t)=>{var r={};for(var o in e)ar.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&wt)for(var o of wt(e))t.indexOf(o)<0&&Kr.call(e,o)&&(r[o]=e[o]);return r};var Et=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var ca=(e,t,r)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ia(t))!ar.call(e,o)&&o!=="default"&&St(e,o,{get:()=>t[o],enumerable:!(r=na(t,o))||r.enumerable});return e},ct=e=>ca(sa(St(e!=null?oa(aa(e)):{},"default",e&&e.__esModule&&"default"in e?{get:()=>e.default,enumerable:!0}:{value:e,enumerable:!0})),e);var Jr=Et((sr,Gr)=>{(function(e,t){typeof sr=="object"&&typeof Gr!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(sr,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function c(T){var De=T.type,_e=T.tagName;return!!(_e==="INPUT"&&a[De]&&!T.readOnly||_e==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function l(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function p(T){!T.hasAttribute("data-focus-visible-added")||(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function m(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&l(r.activeElement),o=!0)}function f(T){o=!1}function d(T){!s(T.target)||(o||c(T.target))&&l(T.target)}function v(T){!s(T.target)||(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),p(T.target))}function h(T){document.visibilityState==="hidden"&&(n&&(o=!0),U())}function U(){document.addEventListener("mousemove",P),document.addEventListener("mousedown",P),document.addEventListener("mouseup",P),document.addEventListener("pointermove",P),document.addEventListener("pointerdown",P),document.addEventListener("pointerup",P),document.addEventListener("touchmove",P),document.addEventListener("touchstart",P),document.addEventListener("touchend",P)}function Y(){document.removeEventListener("mousemove",P),document.removeEventListener("mousedown",P),document.removeEventListener("mouseup",P),document.removeEventListener("pointermove",P),document.removeEventListener("pointerdown",P),document.removeEventListener("pointerup",P),document.removeEventListener("touchmove",P),document.removeEventListener("touchstart",P),document.removeEventListener("touchend",P)}function P(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(o=!1,Y())}document.addEventListener("keydown",m,!0),document.addEventListener("mousedown",f,!0),document.addEventListener("pointerdown",f,!0),document.addEventListener("touchstart",f,!0),document.addEventListener("visibilitychange",h,!0),U(),r.addEventListener("focus",d,!0),r.addEventListener("blur",v,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var xo=Et((hs,_t)=>{var Xr,Zr,eo,to,ro,oo,no,io,ao,Tt,cr,so,co,lo,Be,po,uo,fo,mo,ho,bo,vo,go,Ot;(function(e){var t=typeof global=="object"?global:typeof self=="object"?self:typeof this=="object"?this:{};typeof define=="function"&&define.amd?define("tslib",["exports"],function(o){e(r(t,r(o)))}):typeof _t=="object"&&typeof _t.exports=="object"?e(r(t,r(_t.exports))):e(r(t));function r(o,n){return o!==t&&(typeof Object.create=="function"?Object.defineProperty(o,"__esModule",{value:!0}):o.__esModule=!0),function(i,a){return o[i]=n?n(i,a):a}}})(function(e){var t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(o,n){o.__proto__=n}||function(o,n){for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(o[i]=n[i])};Xr=function(o,n){if(typeof n!="function"&&n!==null)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");t(o,n);function i(){this.constructor=o}o.prototype=n===null?Object.create(n):(i.prototype=n.prototype,new i)},Zr=Object.assign||function(o){for(var n,i=1,a=arguments.length;i=0;p--)(l=o[p])&&(c=(s<3?l(c):s>3?l(n,i,c):l(n,i))||c);return s>3&&c&&Object.defineProperty(n,i,c),c},ro=function(o,n){return function(i,a){n(i,a,o)}},oo=function(o,n){if(typeof Reflect=="object"&&typeof Reflect.metadata=="function")return Reflect.metadata(o,n)},no=function(o,n,i,a){function s(c){return c instanceof i?c:new i(function(l){l(c)})}return new(i||(i=Promise))(function(c,l){function p(d){try{f(a.next(d))}catch(v){l(v)}}function m(d){try{f(a.throw(d))}catch(v){l(v)}}function f(d){d.done?c(d.value):s(d.value).then(p,m)}f((a=a.apply(o,n||[])).next())})},io=function(o,n){var i={label:0,sent:function(){if(c[0]&1)throw c[1];return c[1]},trys:[],ops:[]},a,s,c,l;return l={next:p(0),throw:p(1),return:p(2)},typeof Symbol=="function"&&(l[Symbol.iterator]=function(){return this}),l;function p(f){return function(d){return m([f,d])}}function m(f){if(a)throw new TypeError("Generator is already executing.");for(;i;)try{if(a=1,s&&(c=f[0]&2?s.return:f[0]?s.throw||((c=s.return)&&c.call(s),0):s.next)&&!(c=c.call(s,f[1])).done)return c;switch(s=0,c&&(f=[f[0]&2,c.value]),f[0]){case 0:case 1:c=f;break;case 4:return i.label++,{value:f[1],done:!1};case 5:i.label++,s=f[1],f=[0];continue;case 7:f=i.ops.pop(),i.trys.pop();continue;default:if(c=i.trys,!(c=c.length>0&&c[c.length-1])&&(f[0]===6||f[0]===2)){i=0;continue}if(f[0]===3&&(!c||f[1]>c[0]&&f[1]=o.length&&(o=void 0),{value:o&&o[a++],done:!o}}};throw new TypeError(n?"Object is not iterable.":"Symbol.iterator is not defined.")},cr=function(o,n){var i=typeof Symbol=="function"&&o[Symbol.iterator];if(!i)return o;var a=i.call(o),s,c=[],l;try{for(;(n===void 0||n-- >0)&&!(s=a.next()).done;)c.push(s.value)}catch(p){l={error:p}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(l)throw l.error}}return c},so=function(){for(var o=[],n=0;n1||p(h,U)})})}function p(h,U){try{m(a[h](U))}catch(Y){v(c[0][3],Y)}}function m(h){h.value instanceof Be?Promise.resolve(h.value.v).then(f,d):v(c[0][2],h)}function f(h){p("next",h)}function d(h){p("throw",h)}function v(h,U){h(U),c.shift(),c.length&&p(c[0][0],c[0][1])}},uo=function(o){var n,i;return n={},a("next"),a("throw",function(s){throw s}),a("return"),n[Symbol.iterator]=function(){return this},n;function a(s,c){n[s]=o[s]?function(l){return(i=!i)?{value:Be(o[s](l)),done:s==="return"}:c?c(l):l}:c}},fo=function(o){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var n=o[Symbol.asyncIterator],i;return n?n.call(o):(o=typeof Tt=="function"?Tt(o):o[Symbol.iterator](),i={},a("next"),a("throw"),a("return"),i[Symbol.asyncIterator]=function(){return this},i);function a(c){i[c]=o[c]&&function(l){return new Promise(function(p,m){l=o[c](l),s(p,m,l.done,l.value)})}}function s(c,l,p,m){Promise.resolve(m).then(function(f){c({value:f,done:p})},l)}},mo=function(o,n){return Object.defineProperty?Object.defineProperty(o,"raw",{value:n}):o.raw=n,o};var r=Object.create?function(o,n){Object.defineProperty(o,"default",{enumerable:!0,value:n})}:function(o,n){o.default=n};ho=function(o){if(o&&o.__esModule)return o;var n={};if(o!=null)for(var i in o)i!=="default"&&Object.prototype.hasOwnProperty.call(o,i)&&Ot(n,o,i);return r(n,o),n},bo=function(o){return o&&o.__esModule?o:{default:o}},vo=function(o,n){if(!n.has(o))throw new TypeError("attempted to get private field on non-instance");return n.get(o)},go=function(o,n,i){if(!n.has(o))throw new TypeError("attempted to set private field on non-instance");return n.set(o,i),i},e("__extends",Xr),e("__assign",Zr),e("__rest",eo),e("__decorate",to),e("__param",ro),e("__metadata",oo),e("__awaiter",no),e("__generator",io),e("__exportStar",ao),e("__createBinding",Ot),e("__values",Tt),e("__read",cr),e("__spread",so),e("__spreadArrays",co),e("__spreadArray",lo),e("__await",Be),e("__asyncGenerator",po),e("__asyncDelegator",uo),e("__asyncValues",fo),e("__makeTemplateObject",mo),e("__importStar",ho),e("__importDefault",bo),e("__classPrivateFieldGet",vo),e("__classPrivateFieldSet",go)})});var Fr=Et((xt,Rr)=>{(function(t,r){typeof xt=="object"&&typeof Rr=="object"?Rr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof xt=="object"?xt.ClipboardJS=r():t.ClipboardJS=r()})(xt,function(){return function(){var e={134:function(o,n,i){"use strict";i.d(n,{default:function(){return ta}});var a=i(279),s=i.n(a),c=i(370),l=i.n(c),p=i(817),m=i.n(p);function f(A){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?f=function(b){return typeof b}:f=function(b){return b&&typeof Symbol=="function"&&b.constructor===Symbol&&b!==Symbol.prototype?"symbol":typeof b},f(A)}function d(A,g){if(!(A instanceof g))throw new TypeError("Cannot call a class as a function")}function v(A,g){for(var b=0;b0&&arguments[0]!==void 0?arguments[0]:{};this.action=b.action,this.container=b.container,this.emitter=b.emitter,this.target=b.target,this.text=b.text,this.trigger=b.trigger,this.selectedText=""}},{key:"initSelection",value:function(){this.text?this.selectFake():this.target&&this.selectTarget()}},{key:"createFakeElement",value:function(){var b=document.documentElement.getAttribute("dir")==="rtl";this.fakeElem=document.createElement("textarea"),this.fakeElem.style.fontSize="12pt",this.fakeElem.style.border="0",this.fakeElem.style.padding="0",this.fakeElem.style.margin="0",this.fakeElem.style.position="absolute",this.fakeElem.style[b?"right":"left"]="-9999px";var C=window.pageYOffset||document.documentElement.scrollTop;return this.fakeElem.style.top="".concat(C,"px"),this.fakeElem.setAttribute("readonly",""),this.fakeElem.value=this.text,this.fakeElem}},{key:"selectFake",value:function(){var b=this,C=this.createFakeElement();this.fakeHandlerCallback=function(){return b.removeFake()},this.fakeHandler=this.container.addEventListener("click",this.fakeHandlerCallback)||!0,this.container.appendChild(C),this.selectedText=m()(C),this.copyText(),this.removeFake()}},{key:"removeFake",value:function(){this.fakeHandler&&(this.container.removeEventListener("click",this.fakeHandlerCallback),this.fakeHandler=null,this.fakeHandlerCallback=null),this.fakeElem&&(this.container.removeChild(this.fakeElem),this.fakeElem=null)}},{key:"selectTarget",value:function(){this.selectedText=m()(this.target),this.copyText()}},{key:"copyText",value:function(){var b;try{b=document.execCommand(this.action)}catch(C){b=!1}this.handleResult(b)}},{key:"handleResult",value:function(b){this.emitter.emit(b?"success":"error",{action:this.action,text:this.selectedText,trigger:this.trigger,clearSelection:this.clearSelection.bind(this)})}},{key:"clearSelection",value:function(){this.trigger&&this.trigger.focus(),document.activeElement.blur(),window.getSelection().removeAllRanges()}},{key:"destroy",value:function(){this.removeFake()}},{key:"action",set:function(){var b=arguments.length>0&&arguments[0]!==void 0?arguments[0]:"copy";if(this._action=b,this._action!=="copy"&&this._action!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"')},get:function(){return this._action}},{key:"target",set:function(b){if(b!==void 0)if(b&&f(b)==="object"&&b.nodeType===1){if(this.action==="copy"&&b.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(this.action==="cut"&&(b.hasAttribute("readonly")||b.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`);this._target=b}else throw new Error('Invalid "target" value, use a valid Element')},get:function(){return this._target}}]),A}(),Y=U;function P(A){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?P=function(b){return typeof b}:P=function(b){return b&&typeof Symbol=="function"&&b.constructor===Symbol&&b!==Symbol.prototype?"symbol":typeof b},P(A)}function T(A,g){if(!(A instanceof g))throw new TypeError("Cannot call a class as a function")}function De(A,g){for(var b=0;b0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof W.action=="function"?W.action:this.defaultAction,this.target=typeof W.target=="function"?W.target:this.defaultTarget,this.text=typeof W.text=="function"?W.text:this.defaultText,this.container=P(W.container)==="object"?W.container:document.body}},{key:"listenClick",value:function(W){var re=this;this.listener=l()(W,"click",function(st){return re.onClick(st)})}},{key:"onClick",value:function(W){var re=W.delegateTarget||W.currentTarget;this.clipboardAction&&(this.clipboardAction=null),this.clipboardAction=new Y({action:this.action(re),target:this.target(re),text:this.text(re),container:this.container,trigger:re,emitter:this})}},{key:"defaultAction",value:function(W){return ir("action",W)}},{key:"defaultTarget",value:function(W){var re=ir("target",W);if(re)return document.querySelector(re)}},{key:"defaultText",value:function(W){return ir("text",W)}},{key:"destroy",value:function(){this.listener.destroy(),this.clipboardAction&&(this.clipboardAction.destroy(),this.clipboardAction=null)}}],[{key:"isSupported",value:function(){var W=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],re=typeof W=="string"?[W]:W,st=!!document.queryCommandSupported;return re.forEach(function(ra){st=st&&!!document.queryCommandSupported(ra)}),st}}]),b}(s()),ta=ea},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,c){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(c))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(p,m,f,d,v){var h=l.apply(this,arguments);return p.addEventListener(f,h,v),{destroy:function(){p.removeEventListener(f,h,v)}}}function c(p,m,f,d,v){return typeof p.addEventListener=="function"?s.apply(null,arguments):typeof f=="function"?s.bind(null,document).apply(null,arguments):(typeof p=="string"&&(p=document.querySelectorAll(p)),Array.prototype.map.call(p,function(h){return s(h,m,f,d,v)}))}function l(p,m,f,d){return function(v){v.delegateTarget=a(v.target,m),v.delegateTarget&&d.call(p,v)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function c(f,d,v){if(!f&&!d&&!v)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(v))throw new TypeError("Third argument must be a Function");if(a.node(f))return l(f,d,v);if(a.nodeList(f))return p(f,d,v);if(a.string(f))return m(f,d,v);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function l(f,d,v){return f.addEventListener(d,v),{destroy:function(){f.removeEventListener(d,v)}}}function p(f,d,v){return Array.prototype.forEach.call(f,function(h){h.addEventListener(d,v)}),{destroy:function(){Array.prototype.forEach.call(f,function(h){h.removeEventListener(d,v)})}}}function m(f,d,v){return s(document.body,f,d,v)}o.exports=c},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),l=document.createRange();l.selectNodeContents(i),c.removeAllRanges(),c.addRange(l),a=c.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var c=this;function l(){c.off(i,l),a.apply(s,arguments)}return l._=a,this.on(i,l,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),c=0,l=s.length;for(c;c{"use strict";var Za=/["'&<>]/;wi.exports=es;function es(e){var t=""+e,r=Za.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=o.hasError,i=o.isStopped,a=o.observers;return n||i?lr:(a.push(r),new le(function(){return Me(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new k;return r.source=this,r},t.create=function(r,o){return new Ho(r,o)},t}(k);var Ho=function(e){X(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:lr},t}(O);var ut={now:function(){return(ut.delegate||Date).now()},delegate:void 0};var ft=function(e){X(t,e);function t(r,o,n){r===void 0&&(r=Infinity),o===void 0&&(o=Infinity),n===void 0&&(n=ut);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===Infinity,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,c=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=Ge.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){if(n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);r.actions.length===0&&(Ge.cancelAnimationFrame(o),r._scheduled=void 0)},t}(jt);var Fo=function(e){X(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0,this._scheduled=void 0;var o=this.actions,n,i=-1;r=r||o.shift();var a=o.length;do if(n=r.execute(r.state,r.delay))break;while(++i=2,!0))}function se(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new O}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,c=s===void 0?!0:s;return function(l){var p=null,m=null,f=null,d=0,v=!1,h=!1,U=function(){m==null||m.unsubscribe(),m=null},Y=function(){U(),p=f=null,v=h=!1},P=function(){var T=p;Y(),T==null||T.unsubscribe()};return x(function(T,De){d++,!h&&!v&&U();var _e=f=f!=null?f:r();De.add(function(){d--,d===0&&!h&&!v&&(m=_r(P,c))}),_e.subscribe(De),p||(p=new pt({next:function(Ke){return _e.next(Ke)},error:function(Ke){h=!0,U(),m=_r(Y,n,Ke),_e.error(Ke)},complete:function(){v=!0,U(),m=_r(Y,a),_e.complete()}}),we(T).subscribe(p))})(l)}}function _r(e,t){for(var r=[],o=2;ot==="focus"),N(e===Ne()))}var on=new O,Fa=de(()=>M(new ResizeObserver(e=>{for(let t of e)on.next(t)}))).pipe(S(e=>J.pipe(N(e)).pipe(j(()=>e.disconnect()))),ee(1));function Se(e){return{width:e.offsetWidth,height:e.offsetHeight}}function vt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function nn(e){let t=e.parentElement;for(;t&&t!==e.offsetParent;){let r=Se(t);if(vt(t).height>r.height)return t;t=t.parentElement}}function Ve(e){return Fa.pipe(_(t=>t.observe(e)),S(t=>on.pipe(L(({target:r})=>r===e),j(()=>t.unobserve(e)),u(()=>Se(e)))),N(Se(e)))}function an(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ia(e){return V(y(e,"scroll"),y(window,"resize")).pipe(u(()=>an(e)),N(an(e)))}function sn(e,t=16){return Ia(e).pipe(u(({y:r})=>{let o=Se(e),n=vt(e);return r>=n.height-o.height-t}),q())}function cn(e){if(e instanceof HTMLInputElement)e.select();else throw new Error("Not implemented")}var Bt={drawer:ae("[data-md-toggle=drawer]"),search:ae("[data-md-toggle=search]")};function ln(e){return Bt[e].checked}function Ue(e,t){Bt[e].checked!==t&&Bt[e].click()}function Yt(e){let t=Bt[e];return y(t,"change").pipe(u(()=>t.checked),N(t.checked))}function Pa(e){switch(e.tagName){case"INPUT":case"SELECT":case"TEXTAREA":return!0;default:return e.isContentEditable}}function pn(){return y(window,"keydown").pipe(L(e=>!(e.metaKey||e.ctrlKey)),u(e=>({mode:ln("search")?"search":"global",type:e.key,claim(){e.preventDefault(),e.stopPropagation()}})),L(({mode:e})=>{if(e==="global"){let t=Ne();if(typeof t!="undefined")return!Pa(t)}return!0}),se())}function ve(){return new URL(location.href)}function Gt(e){location.href=e.href}function un(){return new O}function fn(){return location.hash.substring(1)}function mn(e){let t=We("a");t.href=e,t.addEventListener("click",r=>r.stopPropagation()),t.click()}function $a(){return y(window,"hashchange").pipe(u(fn),N(fn()),L(e=>e.length>0),se())}function dn(){return $a().pipe(S(e=>M(pe(`[id="${e}"]`))))}function gt(e){let t=matchMedia(e);return Qt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function hn(){return y(window,"beforeprint").pipe(B(void 0))}function Hr(e,t){return e.pipe(S(r=>r?t():J))}function Jt(e,t={credentials:"same-origin"}){return we(fetch(`${e}`,t)).pipe(L(r=>r.status===200))}function Te(e,t){return Jt(e,t).pipe(S(r=>r.json()),ee(1))}function bn(e,t){let r=new DOMParser;return Jt(e,t).pipe(S(o=>o.text()),u(o=>r.parseFromString(o,"text/xml")),ee(1))}function vn(e){let t=We("script");return t.src=e,de(()=>(document.head.appendChild(t),V(y(t,"load"),y(t,"error").pipe(S(()=>gr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(B(void 0),j(()=>document.head.removeChild(t)),oe(1))))}function gn(){return{x:Math.max(0,pageXOffset),y:Math.max(0,pageYOffset)}}function jr({x:e,y:t}){window.scrollTo(e||0,t||0)}function xn(){return V(y(window,"scroll",{passive:!0}),y(window,"resize",{passive:!0})).pipe(u(gn),N(gn()))}function yn(){return{width:innerWidth,height:innerHeight}}function Sn(){return y(window,"resize",{passive:!0}).pipe(u(yn),N(yn()))}function wn(){return z([xn(),Sn()]).pipe(u(([e,t])=>({offset:e,size:t})),ee(1))}function Xt(e,{viewport$:t,header$:r}){let o=t.pipe(D("size")),n=z([o,r]).pipe(u(()=>({x:e.offsetLeft,y:e.offsetTop})));return z([r,t,n]).pipe(u(([{height:i},{offset:a,size:s},{x:c,y:l}])=>({offset:{x:a.x-c,y:a.y-l+i},size:s})))}function En(e,{tx$:t}){let r=y(e,"message").pipe(u(({data:o})=>o));return t.pipe(kr(()=>r,{leading:!0,trailing:!0}),_(o=>e.postMessage(o)),Lr(r),se())}var Wa=ae("#__config"),ot=JSON.parse(Wa.textContent);ot.base=new URL(ot.base,ve()).toString().replace(/\/$/,"");function ue(){return ot}function ce(e){return ot.features.includes(e)}function te(e,t){return typeof t!="undefined"?ot.translations[e].replace("#",t.toString()):ot.translations[e]}function Oe(e,t=document){return ae(`[data-md-component=${e}]`,t)}function ne(e,t=document){return I(`[data-md-component=${e}]`,t)}var ai=ct(Fr());function Tn(e,t=0){e.setAttribute("tabindex",t.toString())}function On(e){e.removeAttribute("tabindex")}function _n(e,t){e.setAttribute("data-md-state","lock"),e.style.top=`-${t}px`}function Mn(e){let t=-1*parseInt(e.style.top,10);e.removeAttribute("data-md-state"),e.style.top="",t&&window.scrollTo(0,t)}function Ln(e,t){e.setAttribute("data-md-state",t)}function An(e){e.removeAttribute("data-md-state")}function kn(e,t){e.classList.toggle("md-nav__link--active",t)}function Cn(e){e.classList.remove("md-nav__link--active")}function Hn(e,t){e.firstElementChild.innerHTML=t}function jn(e,t){e.setAttribute("data-md-state",t)}function Rn(e){e.removeAttribute("data-md-state")}function Fn(e,t){e.setAttribute("data-md-state",t)}function In(e){e.removeAttribute("data-md-state")}function Pn(e,t){e.setAttribute("data-md-state",t)}function $n(e){e.removeAttribute("data-md-state")}function Wn(e,t){e.placeholder=t}function Vn(e){e.placeholder=te("search.placeholder")}function Un(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Un(e,r)}function $(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="boolean"?o.setAttribute(n,t[n]):t[n]&&o.setAttribute(n,"");for(let n of r)Un(o,n);return o}function Dn(e,t){let r=t;if(e.length>r){for(;e[r]!==" "&&--r>0;);return`${e.substring(0,r)}...`}return e}function Zt(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Nn(e,t){switch(t){case 0:e.textContent=te("search.result.none");break;case 1:e.textContent=te("search.result.one");break;default:e.textContent=te("search.result.other",Zt(t))}}function Ir(e){e.textContent=te("search.result.placeholder")}function zn(e,t){e.appendChild(t)}function qn(e){e.innerHTML=""}function Qn(e,t){e.style.top=`${t}px`}function Kn(e){e.style.top=""}function Bn(e,t){let r=e.firstElementChild;r.style.height=`${t-2*r.offsetTop}px`}function Yn(e){let t=e.firstElementChild;t.style.height=""}function Gn(e,t){e.lastElementChild.appendChild(t)}function Jn(e,t){e.lastElementChild.setAttribute("data-md-state",t)}function Xn(e,t){e.setAttribute("data-md-state",t)}function Pr(e){e.removeAttribute("data-md-state")}function Zn(e,t){e.setAttribute("data-md-state",t)}function $r(e){e.removeAttribute("data-md-state")}function ei(e){return $("button",{class:"md-clipboard md-icon",title:te("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function ti(e,t){return $("aside",{class:"md-annotation",tabIndex:0},$("div",{class:"md-tooltip"},$("div",{class:"md-tooltip__inner md-typeset"},Array.from(t.childNodes))),$("span",{class:"md-annotation__index"},e))}var qe;(function(r){r[r.TEASER=1]="TEASER",r[r.PARENT=2]="PARENT"})(qe||(qe={}));function Wr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(a=>!e.terms[a]).map(a=>[$("del",null,a)," "]).flat().slice(0,-1),i=new URL(e.location);return ce("search.highlight")&&i.searchParams.set("h",Object.entries(e.terms).filter(([,a])=>a).reduce((a,[s])=>`${a} ${s}`.trim(),"")),$("a",{href:`${i}`,class:"md-search-result__link",tabIndex:-1},$("article",{class:["md-search-result__article",...r?["md-search-result__article--document"]:[]].join(" "),"data-md-score":e.score.toFixed(2)},r>0&&$("div",{class:"md-search-result__icon md-icon"}),$("h1",{class:"md-search-result__title"},e.title),o>0&&e.text.length>0&&$("p",{class:"md-search-result__teaser"},Dn(e.text,320)),e.tags&&e.tags.map(a=>$("span",{class:"md-tag"},a)),o>0&&n.length>0&&$("p",{class:"md-search-result__terms"},te("search.result.term.missing"),": ",n)))}function ri(e){let t=e[0].score,r=[...e],o=r.findIndex(l=>!l.location.includes("#")),[n]=r.splice(o,1),i=r.findIndex(l=>l.scoreWr(l,1)),...s.length?[$("details",{class:"md-search-result__more"},$("summary",{tabIndex:-1},s.length>0&&s.length===1?te("search.result.more.one"):te("search.result.more.other",s.length)),s.map(l=>Wr(l,1)))]:[]];return $("li",{class:"md-search-result__item"},c)}function oi(e){return $("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>$("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?Zt(r):r)))}function ni(e){return $("div",{class:"md-typeset__scrollwrap"},$("div",{class:"md-typeset__table"},e))}function Va(e){let t=ue(),r=new URL(`${e.version}/`,t.base);return $("li",{class:"md-version__item"},$("a",{href:`${r}`,class:"md-version__link"},e.title))}function ii(e,t){return $("div",{class:"md-version"},$("button",{class:"md-version__current"},t.title),$("ul",{class:"md-version__list"},e.map(Va)))}var Ua=0;function Da(e,{viewport$:t}){let r=M(e).pipe(S(i=>{let a=i.closest("[data-tabs]");return a instanceof HTMLElement?V(...I(":scope > input",a).map(s=>y(s,"change"))):J})),o=[],n=e.closest(".annotate.highlighttable")||e.closest(".annotate.highlight");if(n){let i=n.nextElementSibling;if(i instanceof HTMLOListElement){let a=Array.from(i.children);i.remove();for(let s of I(".c, .c1, .cm",e)){let[,c=-1]=s.textContent.match(/\((\d+)\)/)||[],l=a[+c-1];if(typeof l!="undefined"){let p=ti(+c,l);s.replaceWith(p),o.push(p)}}}}return t.pipe(D("size"),Ce(r),u(()=>{let i=Se(e),a=vt(e);return R({scroll:a.width>i.width},o.length&&{annotations:o})}),D("scroll"))}function si(e,t){let r=new O;if(r.pipe(ye(gt("(hover)"))).subscribe(([{scroll:o},n])=>{o&&n?Tn(e):On(e)}),r.pipe(oe(1),bt(({annotations:o})=>!!(o==null?void 0:o.length)),u(({annotations:o})=>o.map(n=>ae(".md-tooltip",n))),$e(viewport$.pipe(D("size")))).subscribe(([o,{size:n}])=>{for(let i of o){let{x:a,width:s}=i.getBoundingClientRect();a+s>n.width?i.classList.add("md-tooltip--end"):i.classList.remove("md-tooltip--end")}}),ai.default.isSupported()){let o=e.closest("pre");o.id=`__code_${++Ua}`,o.insertBefore(ei(o.id),e)}return Da(e,t).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}var Vr,Na=0;function ci(e){return Vr||(Vr=vn("https://unpkg.com/mermaid@8.8.4/dist/mermaid.min.js").pipe(_(()=>mermaid.initialize({startOnLoad:!1,themeCSS:za})),ee(1))),Vr.subscribe(()=>{let t=e.innerText;mermaid.mermaidAPI.render(`__mermaid_${Na++}`,t,r=>{e.innerHTML=r})}),Vr.pipe(B({ref:e}))}var za=` + rect.actor { + fill: white; + } + .classLabel .box { + background-color: var(--md-mermaid-label-bg-color); + fill: var(--md-mermaid-label-bg-color); + opacity: 1; + } + .classLabel .label { + font-family: var(--md-mermaid-font-family); + fill: var(--md-mermaid-label-fg-color) + } + .statediagram-cluster.statediagram-cluster .inner { + fill: var(--md-default-bg-color); + } + .statediagram-state rect.divider { + stroke: var(--md-default-fg-color--lighter); + fill: var(--md-default-fg-color--lightest); + } + .cluster rect { + stroke: var(--md-default-fg-color--lighter); + fill: var(--md-default-fg-color--lightest); + } + .edgeLabel, + .edgeLabel rect { + background-color: var(--md-mermaid-label-bg-color); + fill: var(--md-mermaid-label-bg-color); + } + .cardinality text { + fill: inherit !important; + } + .cardinality, + g.classGroup text { + font-family: var(--md-mermaid-font-family); + fill: var(--md-mermaid-label-fg-color); + } + .edgeLabel .label rect { + fill: transparent; + } + .nodeLabel, + .label, + .label div .edgeLabel { + font-family: var(--md-mermaid-font-family); + color: var(--md-mermaid-label-fg-color); + } + .label foreignObject { + overflow: visible; + } + .arrowheadPath, + marker { + fill: var(--md-mermaid-edge-color) !important; + } + .edgePath .path, + .flowchart-link, + .relation, + .transition { + stroke: var(--md-mermaid-edge-color); + } + .statediagram-cluster rect, + g.classGroup line, + g.classGroup rect, + .node circle, + .node ellipse, + .node path, + .node polygon, + .node rect { + fill: var(--md-mermaid-node-bg-color); + stroke: var(--md-mermaid-node-fg-color); + } + .node circle.state-end { + fill: var(--md-mermaid-label-bg-color); + stroke: none; + } + .node circle.state-start { + fill: var(--md-mermaid-label-fg-color); + stroke: var(--md-mermaid-label-fg-color); + } +`;function qa(e,{target$:t,print$:r}){return t.pipe(u(o=>o.closest("details:not([open])")),L(o=>e===o),Ce(r),B(e))}function li(e,t){let r=new O;return r.subscribe(()=>{e.setAttribute("open",""),e.scrollIntoView()}),qa(e,t).pipe(_(r),j(()=>r.complete()),B({ref:e}))}var pi=We("table");function ui(e){return ze(e,pi),ze(pi,ni(e)),M({ref:e})}function Qa(e){return M(I(":scope > label",e)).pipe(S(t=>M(...t).pipe(Z(r=>{let o=r.previousElementSibling;return y(o,"change").pipe(B({active:r}))}))))}function fi(e){let t=new O;return t.subscribe(({active:r})=>{if(ce("content.tabs.link")){let o=r.innerText.trim();for(let i of I("[data-tabs]"))for(let a of I(":scope > label",i))if(a.innerText.trim()===o){let s=a.previousElementSibling;s.checked=!0;break}let n=__get("__tabs")||[];__set("__tabs",[...new Set([o,...n])])}}),Qa(e).pipe(_(t),j(()=>t.complete()),u(r=>R({ref:e},r)))}function mi(e,{target$:t,viewport$:r,print$:o}){return V(...I("pre:not([class^=mermaid]) > code",e).map(n=>si(n,{viewport$:r})),...I(".mermaid-experimental",e).map(n=>ci(n)),...I("table:not([class])",e).map(n=>ui(n)),...I("details",e).map(n=>li(n,{target$:t,print$:o})),...I("[data-tabs]",e).map(n=>fi(n)))}function Ka(e,{alert$:t}){return t.pipe(S(r=>V(M(!0),M(!1).pipe(ke(2e3))).pipe(u(o=>({message:r,open:o})))))}function di(e,t){let r=new O;return r.pipe(K(G)).subscribe(({message:o,open:n})=>{Hn(e,o),n?jn(e,"open"):Rn(e)}),Ka(e,t).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}function Ba({viewport$:e}){if(!ce("header.autohide"))return M(!1);let t=e.pipe(u(({offset:{y:n}})=>n),xe(2,1),u(([n,i])=>[nMath.abs(i-n.y)>100),u(([,[n]])=>n),q()),o=Yt("search");return z([e,o]).pipe(u(([{offset:n},i])=>n.y>400&&!i),q(),S(n=>n?r:M(!1)),N(!1))}function hi(e,t){return de(()=>{let r=getComputedStyle(e);return M(r.position==="sticky"||r.position==="-webkit-sticky")}).pipe($e(Ve(e),Ba(t)),u(([r,{height:o},n])=>({height:r?o:0,sticky:r,hidden:n})),q((r,o)=>r.sticky===o.sticky&&r.height===o.height&&r.hidden===o.hidden),ee(1))}function bi(e,{header$:t,main$:r}){let o=new O;return o.pipe(D("active"),$e(t),K(G)).subscribe(([{active:n},{hidden:i}])=>{n?Fn(e,i?"hidden":"shadow"):In(e)}),r.subscribe(n=>o.next(n)),t.pipe(u(n=>R({ref:e},n)))}function Ya(e,{viewport$:t,header$:r}){return Xt(e,{header$:r,viewport$:t}).pipe(u(({offset:{y:o}})=>{let{height:n}=Se(e);return{active:o>=n}}),D("active"))}function vi(e,t){let r=new O;r.pipe(K(G)).subscribe(({active:n})=>{n?Pn(e,"active"):$n(e)});let o=pe("article h1");return typeof o=="undefined"?J:Ya(o,t).pipe(_(r),j(()=>r.complete()),u(n=>R({ref:e},n)))}function gi(e,{viewport$:t,header$:r}){let o=r.pipe(u(({height:i})=>i),q()),n=o.pipe(S(()=>Ve(e).pipe(u(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),D("bottom"))));return z([o,n,t]).pipe(u(([i,{top:a,bottom:s},{offset:{y:c},size:{height:l}}])=>(l=Math.max(0,l-Math.max(0,a-c,i)-Math.max(0,l+c-s)),{offset:a-i,height:l,active:a-i<=c})),q((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ga(e){let t=__get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=M(...e).pipe(Z(o=>y(o,"change").pipe(B(o))),N(e[Math.max(0,t.index)]),u(o=>({index:e.indexOf(o),color:{scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),ee(1));return r.subscribe(o=>{__set("__palette",o)}),r}function xi(e){let t=new O;t.subscribe(o=>{for(let[n,i]of Object.entries(o.color))typeof i=="string"&&document.body.setAttribute(`data-md-color-${n}`,i);for(let n=0;nt.complete()),u(o=>R({ref:e},o)))}var Ur=ct(Fr());function Ja(e){let t=I(".md-annotation",e);for(let o of t)o.hidden=!0;let r=e.innerText;for(let o of t)o.hidden=!1;return r}function yi({alert$:e}){Ur.default.isSupported()&&new k(t=>{new Ur.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ja(ae(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).subscribe(()=>e.next(te("clipboard.copied")))}function Xa(e){if(e.length<2)return[""];let[t,r]=[...e].sort((n,i)=>n.length-i.length).map(n=>n.replace(/[^/]+$/,"")),o=0;if(t===r)o=t.length;else for(;t.charCodeAt(o)===r.charCodeAt(o);)o++;return e.map(n=>n.replace(t.slice(0,o),""))}function er(e){let t=__get("__sitemap",sessionStorage,e);if(t)return M(t);{let r=ue();return bn(new URL("sitemap.xml",e||r.base)).pipe(u(o=>Xa(I("loc",o).map(n=>n.textContent))),Ae([]),_(o=>__set("__sitemap",o,sessionStorage,e)))}}function Si({document$:e,location$:t,viewport$:r}){let o=ue();if(location.protocol==="file:")return;"scrollRestoration"in history&&(history.scrollRestoration="manual",y(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}));let n=pe("link[rel=icon]");typeof n!="undefined"&&(n.href=n.href);let i=er().pipe(u(l=>l.map(p=>`${o.base}/${p}`)),S(l=>y(document.body,"click").pipe(L(p=>!p.metaKey&&!p.ctrlKey),S(p=>{if(p.target instanceof Element){let m=p.target.closest("a");if(m&&!m.target&&l.includes(m.href))return p.preventDefault(),M({url:new URL(m.href)})}return J}))),se()),a=y(window,"popstate").pipe(L(l=>l.state!==null),u(l=>({url:new URL(location.href),offset:l.state})),se());V(i,a).pipe(q((l,p)=>l.url.href===p.url.href),u(({url:l})=>l)).subscribe(t);let s=t.pipe(D("pathname"),S(l=>Jt(l.href).pipe(tt(()=>(Gt(l),J)))),se());i.pipe(rt(s)).subscribe(({url:l})=>{history.pushState({},"",`${l}`)});let c=new DOMParser;s.pipe(S(l=>l.text()),u(l=>c.parseFromString(l,"text/html"))).subscribe(e),V(i,a).pipe(rt(e)).subscribe(({url:l,offset:p})=>{l.hash&&!p?mn(l.hash):jr(p||{y:0})}),e.pipe(Kt(1)).subscribe(l=>{for(let p of["title","link[rel=canonical]","meta[name=author]","meta[name=description]","[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=logo], .md-logo","[data-md-component=skip]"]){let m=pe(p),f=pe(p,l);typeof m!="undefined"&&typeof f!="undefined"&&ze(m,f)}}),e.pipe(Kt(1),u(()=>Oe("container")),S(l=>M(...I("script",l))),Sr(l=>{let p=We("script");if(l.src){for(let m of l.getAttributeNames())p.setAttribute(m,l.getAttribute(m));return ze(l,p),new k(m=>{p.onload=()=>m.complete()})}else return p.textContent=l.textContent,ze(l,p),ge})).subscribe(),r.pipe(Mr(i),wr(250),D("offset")).subscribe(({offset:l})=>{history.replaceState(l,"")}),V(i,a).pipe(xe(2,1),L(([l,p])=>l.url.pathname===p.url.pathname),u(([,l])=>l)).subscribe(({offset:l})=>{jr(l||{y:0})})}var ts=ct(Ei());function Dr(e){let t=new RegExp(e.separator,"img"),r=(o,n,i)=>`${n}${i}`;return o=>{o=o.replace(/[\s*+\-:~^]+/g," ").trim();let n=new RegExp(`(^|${e.separator})(${o.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(t,"|")})`,"img");return i=>i.replace(n,r).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ti(e){return e.split(/"([^"]+)"/g).map((t,r)=>r&1?t.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g," +"):t).join("").replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g,"").trim()}var je;(function(n){n[n.SETUP=0]="SETUP",n[n.READY=1]="READY",n[n.QUERY=2]="QUERY",n[n.RESULT=3]="RESULT"})(je||(je={}));function nt(e){return e.type===1}function Oi(e){return e.type===2}function it(e){return e.type===3}function rs({config:e,docs:t,index:r}){e.lang.length===1&&e.lang[0]==="en"&&(e.lang=[te("search.config.lang")]),e.separator==="[\\s\\-]+"&&(e.separator=te("search.config.separator"));let n={pipeline:te("search.config.pipeline").split(/\s*,\s*/).filter(Boolean),suggestions:ce("search.suggest")};return{config:e,docs:t,index:r,options:n}}function _i(e,t){let r=ue(),o=new Worker(e),n=new O,i=En(o,{tx$:n}).pipe(u(a=>{if(it(a))for(let s of a.data.items)for(let c of s)c.location=`${r.base}/${c.location}`;return a}),se());return we(t).pipe(u(a=>({type:je.SETUP,data:rs(a)}))).subscribe(n.next.bind(n)),{tx$:n,rx$:i}}function Mi(){let e=ue(),t=Te(new URL("versions.json",e.base)),r=t.pipe(u(o=>{let[,n]=e.base.match(/([^/]+)\/?$/);return o.find(({version:i,aliases:a})=>i===n||a.includes(n))||o[0]}));z([t,r]).pipe(u(([o,n])=>new Map(o.filter(i=>i!==n).map(i=>[`${new URL(`${i.version}/`,e.base)}`,i]))),S(o=>y(document.body,"click").pipe(L(n=>!n.metaKey&&!n.ctrlKey),S(n=>{if(n.target instanceof Element){let i=n.target.closest("a");if(i&&!i.target&&o.has(i.href))return n.preventDefault(),M(i.href)}return J}),S(n=>{let{version:i}=o.get(n);return er(n).pipe(u(a=>{let c=ve().href.replace(`${e.base}/`,"");return a.includes(c)?new URL(`${i}/${c}`,e.base):new URL(n)}))})))).subscribe(o=>Gt(o)),z([t,r]).subscribe(([o,n])=>{var a;if(ae(".md-header__topic").appendChild(ii(o,n)),__get("__outdated",sessionStorage)===null){let s=((a=e.version)==null?void 0:a.default)||"latest",c=!n.aliases.includes(s);if(__set("__outdated",c,sessionStorage),c)for(let l of ne("outdated"))l.hidden=!1}})}function os(e,{rx$:t}){let r=(__search==null?void 0:__search.transform)||Ti,o=rn(e),n=V(y(e,"keyup"),y(e,"focus").pipe(ke(1))).pipe(u(()=>r(e.value)),q()),i=ve();return i.searchParams.has("q")&&(Ue("search",!0),t.pipe(L(nt),oe(1)).subscribe(()=>{e.value=i.searchParams.get("q"),He(e)})),z([n,o]).pipe(u(([a,s])=>({value:a,focus:s})))}function Li(e,{tx$:t,rx$:r}){let o=new O;return o.pipe(D("value"),u(({value:n})=>({type:je.QUERY,data:n}))).subscribe(t.next.bind(t)),o.pipe(D("focus")).subscribe(({focus:n})=>{n?(Ue("search",n),Wn(e,"")):Vn(e)}),y(e.form,"reset").pipe(Ar(o.pipe(Tr(1)))).subscribe(()=>He(e)),os(e,{tx$:t,rx$:r}).pipe(_(o),j(()=>o.complete()),u(n=>R({ref:e},n)))}function Ai(e,{rx$:t},{query$:r}){let o=new O,n=sn(e.parentElement).pipe(L(Boolean)),i=ae(":scope > :first-child",e),a=ae(":scope > :last-child",e);return t.pipe(L(nt),oe(1)).subscribe(()=>{Ir(i)}),o.pipe(K(G),ye(r)).subscribe(([{items:c},{value:l}])=>{l?Nn(i,c.length):Ir(i)}),o.pipe(K(G),_(()=>qn(a)),S(({items:c})=>V(M(...c.slice(0,10)),M(...c.slice(10)).pipe(xe(4),Cr(n),S(([l])=>M(...l)))))).subscribe(c=>{zn(a,ri(c))}),t.pipe(L(it),u(({data:c})=>c)).pipe(_(o),j(()=>o.complete()),u(c=>R({ref:e},c)))}function ns(e,{query$:t}){return t.pipe(u(({value:r})=>{let o=ve();return o.searchParams.delete("h"),o.searchParams.set("q",r),{url:o}}))}function ki(e,t){let r=new O;return r.subscribe(({url:o})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${o}`}),y(e,"click").subscribe(o=>o.preventDefault()),ns(e,t).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}function Ci(e,{rx$:t},{keyboard$:r}){let o=new O,n=Oe("search-query"),i=y(n,"keydown").pipe(K(Fe),u(()=>n.value),q());return o.pipe($e(i),u(([{suggestions:s},c])=>{let l=c.split(/([\s-]+)/);if((s==null?void 0:s.length)&&l[l.length-1]){let p=s[s.length-1];p.startsWith(l[l.length-1])&&(l[l.length-1]=p)}else l.length=0;return l})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(L(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(L(it),u(({data:s})=>s)).pipe(_(o),j(()=>o.complete()),u(()=>({ref:e})))}function Hi(e,{index$:t,keyboard$:r}){let o=ue(),n=_i(o.search,t),i=Oe("search-query",e),a=Oe("search-result",e),{tx$:s,rx$:c}=n;s.pipe(L(Oi),rt(c.pipe(L(nt),oe(1)))).subscribe(s.next.bind(s)),r.pipe(L(({mode:m})=>m==="search")).subscribe(m=>{let f=Ne();switch(m.type){case"Enter":if(f===i){let d=new Map;for(let v of I(":first-child [href]",a)){let h=v.firstElementChild;d.set(v,parseFloat(h.getAttribute("data-md-score")))}if(d.size){let[[v]]=[...d].sort(([,h],[,U])=>U-h);v.click()}m.claim()}break;case"Escape":case"Tab":Ue("search",!1),He(i,!1);break;case"ArrowUp":case"ArrowDown":if(typeof f=="undefined")He(i);else{let d=[i,...I(":not(details) > [href], summary, details[open] [href]",a)],v=Math.max(0,(Math.max(0,d.indexOf(f))+d.length+(m.type==="ArrowUp"?-1:1))%d.length);He(d[v])}m.claim();break;default:i!==Ne()&&He(i)}}),r.pipe(L(({mode:m})=>m==="global")).subscribe(m=>{switch(m.type){case"f":case"s":case"/":He(i),cn(i),m.claim();break}});let l=Li(i,n),p=Ai(a,n,{query$:l});return V(l,p).pipe(Ce(...ne("search-share",e).map(m=>ki(m,{query$:l})),...ne("search-suggest",e).map(m=>Ci(m,n,{keyboard$:r}))))}function ji(e,{index$:t,location$:r}){return z([t,r.pipe(N(ve()),L(o=>o.searchParams.has("h")))]).pipe(u(([o,n])=>Dr(o.config)(n.searchParams.get("h"))),u(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)==null?void 0:a.offsetHeight){let c=s.textContent,l=o(c);l.length>c.length&&n.set(s,l)}for(let[s,c]of n){let{childNodes:l}=$("span",null,c);s.replaceWith(...Array.from(l))}return{ref:e,nodes:n}}))}function is(e,{viewport$:t,main$:r}){let o=e.parentElement.offsetTop-e.parentElement.parentElement.offsetTop;return z([r,t]).pipe(u(([{offset:n,height:i},{offset:{y:a}}])=>(i=i+Math.min(o,Math.max(0,a-n))-o,{height:i,locked:a>=n+o})),q((n,i)=>n.height===i.height&&n.locked===i.locked))}function Nr(e,o){var n=o,{header$:t}=n,r=Yr(n,["header$"]);let i=new O;return i.pipe(K(G),ye(t)).subscribe({next([{height:a},{height:s}]){Bn(e,a),Qn(e,s)},complete(){Kn(e),Yn(e)}}),i.pipe(K(G),oe(1)).subscribe(()=>{for(let a of I(".md-nav__link--active[href]",e)){let s=nn(a);if(typeof s!="undefined"){let c=a.offsetTop-s.offsetTop,{height:l}=Se(s);c-l+a.offsetHeight>0&&s.scrollTo(0,c-l/2)}}}),is(e,r).pipe(_(i),j(()=>i.complete()),u(a=>R({ref:e},a)))}function Ri(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return ht(Te(`${r}/releases/latest`).pipe(u(o=>({version:o.tag_name})),Ae({})),Te(r).pipe(u(o=>({stars:o.stargazers_count,forks:o.forks_count})),Ae({}))).pipe(u(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/repos/${e}`;return Te(r).pipe(u(o=>({repositories:o.public_repos})),Ae({}))}}function Fi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Te(r).pipe(u(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Ae({}))}function Ii(e){let[t]=e.match(/(git(?:hub|lab))/i)||[];switch(t.toLowerCase()){case"github":let[,r,o]=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);return Ri(r,o);case"gitlab":let[,n,i]=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i);return Fi(n,i);default:return J}}var as;function ss(e){return as||(as=de(()=>{let t=__get("__source",sessionStorage);return t?M(t):Ii(e.href).pipe(_(r=>__set("__source",r,sessionStorage)))}).pipe(tt(()=>J),L(t=>Object.keys(t).length>0),u(t=>({facts:t})),ee(1)))}function Pi(e){let t=new O;return t.subscribe(({facts:r})=>{Gn(e,oi(r)),Jn(e,"done")}),ss(e).pipe(_(t),j(()=>t.complete()),u(r=>R({ref:e},r)))}function cs(e,{viewport$:t,header$:r}){return Ve(document.body).pipe(S(()=>Xt(e,{header$:r,viewport$:t})),u(({offset:{y:o}})=>({hidden:o>=10})),D("hidden"))}function $i(e,t){let r=new O;return r.pipe(K(G)).subscribe({next({hidden:o}){o?Xn(e,"hidden"):Pr(e)},complete(){Pr(e)}}),(ce("navigation.tabs.sticky")?M({hidden:!1}):cs(e,t)).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=I("[href^=\\#]",e);for(let s of n){let c=decodeURIComponent(s.hash.substring(1)),l=pe(`[id="${c}"]`);typeof l!="undefined"&&o.set(s,l)}let i=r.pipe(u(s=>24+s.height));return Ve(document.body).pipe(D("height"),S(s=>de(()=>{let c=[];return M([...o].reduce((l,[p,m])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=m.tagName;)c.pop();let f=m.offsetTop;for(;!f&&m.parentElement;)m=m.parentElement,f=m.offsetTop;return l.set([...c=[...c,p]].reverse(),f)},new Map))}).pipe(u(c=>new Map([...c].sort(([,l],[,p])=>l-p))),S(c=>z([t,i]).pipe(Or(([l,p],[{offset:{y:m},size:f},d])=>{let v=m+f.height>=Math.floor(s.height);for(;p.length;){let[,h]=p[0];if(h-d=m&&!v)p=[l.pop(),...p];else break}return[l,p]},[[],[...c]]),q((l,p)=>l[0]===p[0]&&l[1]===p[1])))))).pipe(u(([s,c])=>({prev:s.map(([l])=>l),next:c.map(([l])=>l)})),N({prev:[],next:[]}),xe(2,1),u(([s,c])=>s.prev.length{for(let[i]of n)Cn(i),An(i);for(let[i,[a]]of o.entries())kn(a,i===o.length-1),Ln(a,"blur");if(ce("navigation.tracking")){let i=ve(),a=o[o.length-1];if(a&&a.length){let[s]=a,{hash:c}=new URL(s.href);i.hash!==c&&(i.hash=c,history.replaceState({},"",`${i}`))}else i.hash="",history.replaceState({},"",`${i}`)}}),ls(e,t).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}function ps(e,{viewport$:t,main$:r}){let o=t.pipe(u(({offset:{y:i}})=>i),xe(2,1),u(([i,a])=>i>a),q()),n=r.pipe(D("active"));return z([n,o]).pipe(u(([{active:i},a])=>({hidden:!(i&&a)})),q((i,a)=>i.hidden===a.hidden))}function Vi(e,t){let r=new O;return r.pipe(K(G)).subscribe({next({hidden:o}){o?Zn(e,"hidden"):$r(e)},complete(){$r(e)}}),ps(e,t).pipe(_(r),j(()=>r.complete()),u(o=>R({ref:e},o)))}function Ui({document$:e,tablet$:t}){e.pipe(S(()=>M(...I("[data-md-state=indeterminate]"))),_(r=>{r.indeterminate=!0,r.checked=!1}),Z(r=>y(r,"change").pipe(bt(()=>r.hasAttribute("data-md-state")),B(r))),ye(t)).subscribe(([r,o])=>{r.removeAttribute("data-md-state"),o&&(r.checked=!1)})}function us(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Di({document$:e}){e.pipe(S(()=>M(...I("[data-md-scrollfix]"))),_(t=>t.removeAttribute("data-md-scrollfix")),L(us),Z(t=>y(t,"touchstart").pipe(B(t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ni({viewport$:e,tablet$:t}){z([Yt("search"),t]).pipe(u(([r,o])=>r&&!o),S(r=>M(r).pipe(ke(r?400:100),K(G))),ye(e)).subscribe(([r,{offset:{y:o}}])=>{r?_n(document.body,o):Mn(document.body)})}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var at=tn(),tr=un(),zr=dn(),qr=pn(),he=wn(),rr=gt("(min-width: 960px)"),zi=gt("(min-width: 1220px)"),qi=hn(),Qi=ue(),Ki=document.forms.namedItem("search")?(__search==null?void 0:__search.index)||Te(`${Qi.base}/search/search_index.json`):J,Qr=new O;yi({alert$:Qr});ce("navigation.instant")&&Si({document$:at,location$:tr,viewport$:he});var Yi;((Yi=Qi.version)==null?void 0:Yi.provider)==="mike"&&Mi();V(tr,zr).pipe(ke(125)).subscribe(()=>{Ue("drawer",!1),Ue("search",!1)});qr.pipe(L(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=pe("[href][rel=prev]");typeof t!="undefined"&&t.click();break;case"n":case".":let r=pe("[href][rel=next]");typeof r!="undefined"&&r.click();break}});Ui({document$:at,tablet$:rr});Di({document$:at});Ni({viewport$:he,tablet$:rr});var Qe=hi(Oe("header"),{viewport$:he}),or=at.pipe(u(()=>Oe("main")),S(e=>gi(e,{viewport$:he,header$:Qe})),ee(1)),fs=V(...ne("dialog").map(e=>di(e,{alert$:Qr})),...ne("header").map(e=>bi(e,{viewport$:he,header$:Qe,main$:or})),...ne("palette").map(e=>xi(e)),...ne("search").map(e=>Hi(e,{index$:Ki,keyboard$:qr})),...ne("source").map(e=>Pi(e))),ms=de(()=>V(...ne("content").map(e=>mi(e,{target$:zr,viewport$:he,print$:qi})),...ne("content").map(e=>ce("search.highlight")?ji(e,{index$:Ki,location$:tr}):J),...ne("header-title").map(e=>vi(e,{viewport$:he,header$:Qe})),...ne("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Hr(zi,()=>Nr(e,{viewport$:he,header$:Qe,main$:or})):Hr(rr,()=>Nr(e,{viewport$:he,header$:Qe,main$:or}))),...ne("tabs").map(e=>$i(e,{viewport$:he,header$:Qe})),...ne("toc").map(e=>Wi(e,{viewport$:he,header$:Qe})),...ne("top").map(e=>Vi(e,{viewport$:he,main$:or})))),Bi=at.pipe(S(()=>ms),Ce(fs),ee(1));Bi.subscribe();window.document$=at;window.location$=tr;window.target$=zr;window.keyboard$=qr;window.viewport$=he;window.tablet$=rr;window.screen$=zi;window.print$=qi;window.alert$=Qr;window.component$=Bi;})(); +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */ +/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. +***************************************************************************** */ diff --git a/1.3/assets/javascripts/lunr/min/lunr.ar.min.js b/1.3/assets/javascripts/lunr/min/lunr.ar.min.js new file mode 100644 index 00000000..248ddc5d --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.ar.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ar=function(){this.pipeline.reset(),this.pipeline.add(e.ar.trimmer,e.ar.stopWordFilter,e.ar.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ar.stemmer))},e.ar.wordCharacters="ء-ٛٱـ",e.ar.trimmer=e.trimmerSupport.generateTrimmer(e.ar.wordCharacters),e.Pipeline.registerFunction(e.ar.trimmer,"trimmer-ar"),e.ar.stemmer=function(){var e=this;return e.result=!1,e.preRemoved=!1,e.sufRemoved=!1,e.pre={pre1:"ف ك ب و س ل ن ا ي ت",pre2:"ال لل",pre3:"بال وال فال تال كال ولل",pre4:"فبال كبال وبال وكال"},e.suf={suf1:"ه ك ت ن ا ي",suf2:"نك نه ها وك يا اه ون ين تن تم نا وا ان كم كن ني نن ما هم هن تك ته ات يه",suf3:"تين كهم نيه نهم ونه وها يهم ونا ونك وني وهم تكم تنا تها تني تهم كما كها ناه نكم هنا تان يها",suf4:"كموه ناها ونني ونهم تكما تموه تكاه كماه ناكم ناهم نيها وننا"},e.patterns=JSON.parse('{"pt43":[{"pt":[{"c":"ا","l":1}]},{"pt":[{"c":"ا,ت,ن,ي","l":0}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"و","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ي","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ا","l":2},{"c":"ل","l":3,"m":3}]},{"pt":[{"c":"م","l":0}]}],"pt53":[{"pt":[{"c":"ت","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":3},{"c":"ل","l":3,"m":4},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":3}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ن","l":4}]},{"pt":[{"c":"ت","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"م","l":0},{"c":"و","l":3}]},{"pt":[{"c":"ا","l":1},{"c":"و","l":3}]},{"pt":[{"c":"و","l":1},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"ا","l":2},{"c":"ن","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":1},{"c":"ا","l":3}]},{"pt":[{"c":"ي,ت,ا,ن","l":0},{"c":"ت","l":1}],"mPt":[{"c":"ف","l":0,"m":2},{"c":"ع","l":1,"m":3},{"c":"ا","l":2},{"c":"ل","l":3,"m":4}]},{"pt":[{"c":"ت,ي,ا,ن","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":2},{"c":"ي","l":3}]},{"pt":[{"c":"ا,ي,ت,ن","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ء","l":4}]}],"pt63":[{"pt":[{"c":"ا","l":0},{"c":"ت","l":2},{"c":"ا","l":4}]},{"pt":[{"c":"ا,ت,ن,ي","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"و","l":3}]},{"pt":[{"c":"م","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ي","l":1},{"c":"ي","l":3},{"c":"ا","l":4},{"c":"ء","l":5}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ا","l":4}]}],"pt54":[{"pt":[{"c":"ت","l":0}]},{"pt":[{"c":"ا,ي,ت,ن","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"م","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":2}]}],"pt64":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":1}]}],"pt73":[{"pt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ا","l":5}]}],"pt75":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":5}]}]}'),e.execArray=["cleanWord","removeDiacritics","cleanAlef","removeStopWords","normalizeHamzaAndAlef","removeStartWaw","removePre432","removeEndTaa","wordCheck"],e.stem=function(){var r=0;for(e.result=!1,e.preRemoved=!1,e.sufRemoved=!1;r=0)return!0},e.normalizeHamzaAndAlef=function(){return e.word=e.word.replace("ؤ","ء"),e.word=e.word.replace("ئ","ء"),e.word=e.word.replace(/([\u0627])\1+/gi,"ا"),!1},e.removeEndTaa=function(){return!(e.word.length>2)||(e.word=e.word.replace(/[\u0627]$/,""),e.word=e.word.replace("ة",""),!1)},e.removeStartWaw=function(){return e.word.length>3&&"و"==e.word[0]&&"و"==e.word[1]&&(e.word=e.word.slice(1)),!1},e.removePre432=function(){var r=e.word;if(e.word.length>=7){var t=new RegExp("^("+e.pre.pre4.split(" ").join("|")+")");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=6){var c=new RegExp("^("+e.pre.pre3.split(" ").join("|")+")");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=5){var l=new RegExp("^("+e.pre.pre2.split(" ").join("|")+")");e.word=e.word.replace(l,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.patternCheck=function(r){for(var t=0;t3){var t=new RegExp("^("+e.pre.pre1.split(" ").join("|")+")");e.word=e.word.replace(t,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.removeSuf1=function(){var r=e.word;if(0==e.sufRemoved&&e.word.length>3){var t=new RegExp("("+e.suf.suf1.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.removeSuf432=function(){var r=e.word;if(e.word.length>=6){var t=new RegExp("("+e.suf.suf4.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=5){var c=new RegExp("("+e.suf.suf3.split(" ").join("|")+")$");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=4){var l=new RegExp("("+e.suf.suf2.split(" ").join("|")+")$");e.word=e.word.replace(l,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.wordCheck=function(){for(var r=(e.word,[e.removeSuf432,e.removeSuf1,e.removePre1]),t=0,c=!1;e.word.length>=7&&!e.result&&t=f.limit)return;f.cursor++}for(;!f.out_grouping(w,97,248);){if(f.cursor>=f.limit)return;f.cursor++}d=f.cursor,d=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(c,32),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del();break;case 2:f.in_grouping_b(p,97,229)&&f.slice_del()}}function t(){var e,r=f.limit-f.cursor;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.find_among_b(l,4)?(f.bra=f.cursor,f.limit_backward=e,f.cursor=f.limit-r,f.cursor>f.limit_backward&&(f.cursor--,f.bra=f.cursor,f.slice_del())):f.limit_backward=e)}function s(){var e,r,i,n=f.limit-f.cursor;if(f.ket=f.cursor,f.eq_s_b(2,"st")&&(f.bra=f.cursor,f.eq_s_b(2,"ig")&&f.slice_del()),f.cursor=f.limit-n,f.cursor>=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(m,5),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del(),i=f.limit-f.cursor,t(),f.cursor=f.limit-i;break;case 2:f.slice_from("løs")}}function o(){var e;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.out_grouping_b(w,97,248)?(f.bra=f.cursor,u=f.slice_to(u),f.limit_backward=e,f.eq_v_b(u)&&f.slice_del()):f.limit_backward=e)}var a,d,u,c=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],l=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],w=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],p=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],f=new i;this.setCurrent=function(e){f.setCurrent(e)},this.getCurrent=function(){return f.getCurrent()},this.stem=function(){var r=f.cursor;return e(),f.limit_backward=r,f.cursor=f.limit,n(),f.cursor=f.limit,t(),f.cursor=f.limit,s(),f.cursor=f.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.de.min.js b/1.3/assets/javascripts/lunr/min/lunr.de.min.js new file mode 100644 index 00000000..f3b5c108 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.de.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `German` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.de=function(){this.pipeline.reset(),this.pipeline.add(e.de.trimmer,e.de.stopWordFilter,e.de.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.de.stemmer))},e.de.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.de.trimmer=e.trimmerSupport.generateTrimmer(e.de.wordCharacters),e.Pipeline.registerFunction(e.de.trimmer,"trimmer-de"),e.de.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!v.eq_s(1,e)||(v.ket=v.cursor,!v.in_grouping(p,97,252)))&&(v.slice_from(r),v.cursor=n,!0)}function i(){for(var r,n,i,s,t=v.cursor;;)if(r=v.cursor,v.bra=r,v.eq_s(1,"ß"))v.ket=v.cursor,v.slice_from("ss");else{if(r>=v.limit)break;v.cursor=r+1}for(v.cursor=t;;)for(n=v.cursor;;){if(i=v.cursor,v.in_grouping(p,97,252)){if(s=v.cursor,v.bra=s,e("u","U",i))break;if(v.cursor=s,e("y","Y",i))break}if(i>=v.limit)return void(v.cursor=n);v.cursor=i+1}}function s(){for(;!v.in_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}for(;!v.out_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}return!1}function t(){m=v.limit,l=m;var e=v.cursor+3;0<=e&&e<=v.limit&&(d=e,s()||(m=v.cursor,m=v.limit)return;v.cursor++}}}function c(){return m<=v.cursor}function u(){return l<=v.cursor}function a(){var e,r,n,i,s=v.limit-v.cursor;if(v.ket=v.cursor,(e=v.find_among_b(w,7))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:v.slice_del(),v.ket=v.cursor,v.eq_s_b(1,"s")&&(v.bra=v.cursor,v.eq_s_b(3,"nis")&&v.slice_del());break;case 3:v.in_grouping_b(g,98,116)&&v.slice_del()}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(f,4))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:if(v.in_grouping_b(k,98,116)){var t=v.cursor-3;v.limit_backward<=t&&t<=v.limit&&(v.cursor=t,v.slice_del())}}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(_,8))&&(v.bra=v.cursor,u()))switch(e){case 1:v.slice_del(),v.ket=v.cursor,v.eq_s_b(2,"ig")&&(v.bra=v.cursor,r=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-r,u()&&v.slice_del()));break;case 2:n=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-n,v.slice_del());break;case 3:if(v.slice_del(),v.ket=v.cursor,i=v.limit-v.cursor,!v.eq_s_b(2,"er")&&(v.cursor=v.limit-i,!v.eq_s_b(2,"en")))break;v.bra=v.cursor,c()&&v.slice_del();break;case 4:v.slice_del(),v.ket=v.cursor,e=v.find_among_b(b,2),e&&(v.bra=v.cursor,u()&&1==e&&v.slice_del())}}var d,l,m,h=[new r("",-1,6),new r("U",0,2),new r("Y",0,1),new r("ä",0,3),new r("ö",0,4),new r("ü",0,5)],w=[new r("e",-1,2),new r("em",-1,1),new r("en",-1,2),new r("ern",-1,1),new r("er",-1,1),new r("s",-1,3),new r("es",5,2)],f=[new r("en",-1,1),new r("er",-1,1),new r("st",-1,2),new r("est",2,1)],b=[new r("ig",-1,1),new r("lich",-1,1)],_=[new r("end",-1,1),new r("ig",-1,2),new r("ung",-1,1),new r("lich",-1,3),new r("isch",-1,2),new r("ik",-1,2),new r("heit",-1,3),new r("keit",-1,4)],p=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8],g=[117,30,5],k=[117,30,4],v=new n;this.setCurrent=function(e){v.setCurrent(e)},this.getCurrent=function(){return v.getCurrent()},this.stem=function(){var e=v.cursor;return i(),v.cursor=e,t(),v.limit_backward=e,v.cursor=v.limit,a(),v.cursor=v.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.de.stemmer,"stemmer-de"),e.de.stopWordFilter=e.generateStopWordFilter("aber alle allem allen aller alles als also am an ander andere anderem anderen anderer anderes anderm andern anderr anders auch auf aus bei bin bis bist da damit dann das dasselbe dazu daß dein deine deinem deinen deiner deines dem demselben den denn denselben der derer derselbe derselben des desselben dessen dich die dies diese dieselbe dieselben diesem diesen dieser dieses dir doch dort du durch ein eine einem einen einer eines einig einige einigem einigen einiger einiges einmal er es etwas euch euer eure eurem euren eurer eures für gegen gewesen hab habe haben hat hatte hatten hier hin hinter ich ihm ihn ihnen ihr ihre ihrem ihren ihrer ihres im in indem ins ist jede jedem jeden jeder jedes jene jenem jenen jener jenes jetzt kann kein keine keinem keinen keiner keines können könnte machen man manche manchem manchen mancher manches mein meine meinem meinen meiner meines mich mir mit muss musste nach nicht nichts noch nun nur ob oder ohne sehr sein seine seinem seinen seiner seines selbst sich sie sind so solche solchem solchen solcher solches soll sollte sondern sonst um und uns unse unsem unsen unser unses unter viel vom von vor war waren warst was weg weil weiter welche welchem welchen welcher welches wenn werde werden wie wieder will wir wird wirst wo wollen wollte während würde würden zu zum zur zwar zwischen über".split(" ")),e.Pipeline.registerFunction(e.de.stopWordFilter,"stopWordFilter-de")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.du.min.js b/1.3/assets/javascripts/lunr/min/lunr.du.min.js new file mode 100644 index 00000000..49a0f3f0 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.du.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Dutch` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");console.warn('[Lunr Languages] Please use the "nl" instead of the "du". The "nl" code is the standard code for Dutch language, and "du" will be removed in the next major versions.'),e.du=function(){this.pipeline.reset(),this.pipeline.add(e.du.trimmer,e.du.stopWordFilter,e.du.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.du.stemmer))},e.du.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.du.trimmer=e.trimmerSupport.generateTrimmer(e.du.wordCharacters),e.Pipeline.registerFunction(e.du.trimmer,"trimmer-du"),e.du.stemmer=function(){var r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e,r,i,o=C.cursor;;){if(C.bra=C.cursor,e=C.find_among(b,11))switch(C.ket=C.cursor,e){case 1:C.slice_from("a");continue;case 2:C.slice_from("e");continue;case 3:C.slice_from("i");continue;case 4:C.slice_from("o");continue;case 5:C.slice_from("u");continue;case 6:if(C.cursor>=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(r=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=r);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=r;else if(n(r))break}else if(n(r))break}function n(e){return C.cursor=e,e>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,f=_,t()||(_=C.cursor,_<3&&(_=3),t()||(f=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var e;;)if(C.bra=C.cursor,e=C.find_among(p,3))switch(C.ket=C.cursor,e){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return f<=C.cursor}function a(){var e=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-e,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var e;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.slice_del(),w=!0,a())))}function m(){var e;u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.eq_s_b(3,"gem")||(C.cursor=C.limit-e,C.slice_del(),a())))}function d(){var e,r,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,e=C.find_among_b(h,5))switch(C.bra=C.cursor,e){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(z,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(r=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-r,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,e=C.find_among_b(k,6))switch(C.bra=C.cursor,e){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(j,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var f,_,w,b=[new r("",-1,6),new r("á",0,1),new r("ä",0,1),new r("é",0,2),new r("ë",0,2),new r("í",0,3),new r("ï",0,3),new r("ó",0,4),new r("ö",0,4),new r("ú",0,5),new r("ü",0,5)],p=[new r("",-1,3),new r("I",0,2),new r("Y",0,1)],g=[new r("dd",-1,-1),new r("kk",-1,-1),new r("tt",-1,-1)],h=[new r("ene",-1,2),new r("se",-1,3),new r("en",-1,2),new r("heden",2,1),new r("s",-1,3)],k=[new r("end",-1,1),new r("ig",-1,2),new r("ing",-1,1),new r("lijk",-1,3),new r("baar",-1,4),new r("bar",-1,5)],v=[new r("aa",-1,-1),new r("ee",-1,-1),new r("oo",-1,-1),new r("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(e){C.setCurrent(e)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var r=C.cursor;return e(),C.cursor=r,o(),C.limit_backward=r,C.cursor=C.limit,d(),C.cursor=C.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.du.stemmer,"stemmer-du"),e.du.stopWordFilter=e.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),e.Pipeline.registerFunction(e.du.stopWordFilter,"stopWordFilter-du")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.es.min.js b/1.3/assets/javascripts/lunr/min/lunr.es.min.js new file mode 100644 index 00000000..2989d342 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.es.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Spanish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,s){"function"==typeof define&&define.amd?define(s):"object"==typeof exports?module.exports=s():s()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.es=function(){this.pipeline.reset(),this.pipeline.add(e.es.trimmer,e.es.stopWordFilter,e.es.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.es.stemmer))},e.es.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.es.trimmer=e.trimmerSupport.generateTrimmer(e.es.wordCharacters),e.Pipeline.registerFunction(e.es.trimmer,"trimmer-es"),e.es.stemmer=function(){var s=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(){if(A.out_grouping(x,97,252)){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}return!0}function n(){if(A.in_grouping(x,97,252)){var s=A.cursor;if(e()){if(A.cursor=s,!A.in_grouping(x,97,252))return!0;for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}}return!1}return!0}function i(){var s,r=A.cursor;if(n()){if(A.cursor=r,!A.out_grouping(x,97,252))return;if(s=A.cursor,e()){if(A.cursor=s,!A.in_grouping(x,97,252)||A.cursor>=A.limit)return;A.cursor++}}g=A.cursor}function a(){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}return!0}function t(){var e=A.cursor;g=A.limit,p=g,v=g,i(),A.cursor=e,a()&&(p=A.cursor,a()&&(v=A.cursor))}function o(){for(var e;;){if(A.bra=A.cursor,e=A.find_among(k,6))switch(A.ket=A.cursor,e){case 1:A.slice_from("a");continue;case 2:A.slice_from("e");continue;case 3:A.slice_from("i");continue;case 4:A.slice_from("o");continue;case 5:A.slice_from("u");continue;case 6:if(A.cursor>=A.limit)break;A.cursor++;continue}break}}function u(){return g<=A.cursor}function w(){return p<=A.cursor}function c(){return v<=A.cursor}function m(){var e;if(A.ket=A.cursor,A.find_among_b(y,13)&&(A.bra=A.cursor,(e=A.find_among_b(q,11))&&u()))switch(e){case 1:A.bra=A.cursor,A.slice_from("iendo");break;case 2:A.bra=A.cursor,A.slice_from("ando");break;case 3:A.bra=A.cursor,A.slice_from("ar");break;case 4:A.bra=A.cursor,A.slice_from("er");break;case 5:A.bra=A.cursor,A.slice_from("ir");break;case 6:A.slice_del();break;case 7:A.eq_s_b(1,"u")&&A.slice_del()}}function l(e,s){if(!c())return!0;A.slice_del(),A.ket=A.cursor;var r=A.find_among_b(e,s);return r&&(A.bra=A.cursor,1==r&&c()&&A.slice_del()),!1}function d(e){return!c()||(A.slice_del(),A.ket=A.cursor,A.eq_s_b(2,e)&&(A.bra=A.cursor,c()&&A.slice_del()),!1)}function b(){var e;if(A.ket=A.cursor,e=A.find_among_b(S,46)){switch(A.bra=A.cursor,e){case 1:if(!c())return!1;A.slice_del();break;case 2:if(d("ic"))return!1;break;case 3:if(!c())return!1;A.slice_from("log");break;case 4:if(!c())return!1;A.slice_from("u");break;case 5:if(!c())return!1;A.slice_from("ente");break;case 6:if(!w())return!1;A.slice_del(),A.ket=A.cursor,e=A.find_among_b(C,4),e&&(A.bra=A.cursor,c()&&(A.slice_del(),1==e&&(A.ket=A.cursor,A.eq_s_b(2,"at")&&(A.bra=A.cursor,c()&&A.slice_del()))));break;case 7:if(l(P,3))return!1;break;case 8:if(l(F,3))return!1;break;case 9:if(d("at"))return!1}return!0}return!1}function f(){var e,s;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(W,12),A.limit_backward=s,e)){if(A.bra=A.cursor,1==e){if(!A.eq_s_b(1,"u"))return!1;A.slice_del()}return!0}return!1}function _(){var e,s,r,n;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(L,96),A.limit_backward=s,e))switch(A.bra=A.cursor,e){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"u")?(n=A.limit-A.cursor,A.eq_s_b(1,"g")?A.cursor=A.limit-n:A.cursor=A.limit-r):A.cursor=A.limit-r,A.bra=A.cursor;case 2:A.slice_del()}}function h(){var e,s;if(A.ket=A.cursor,e=A.find_among_b(z,8))switch(A.bra=A.cursor,e){case 1:u()&&A.slice_del();break;case 2:u()&&(A.slice_del(),A.ket=A.cursor,A.eq_s_b(1,"u")&&(A.bra=A.cursor,s=A.limit-A.cursor,A.eq_s_b(1,"g")&&(A.cursor=A.limit-s,u()&&A.slice_del())))}}var v,p,g,k=[new s("",-1,6),new s("á",0,1),new s("é",0,2),new s("í",0,3),new s("ó",0,4),new s("ú",0,5)],y=[new s("la",-1,-1),new s("sela",0,-1),new s("le",-1,-1),new s("me",-1,-1),new s("se",-1,-1),new s("lo",-1,-1),new s("selo",5,-1),new s("las",-1,-1),new s("selas",7,-1),new s("les",-1,-1),new s("los",-1,-1),new s("selos",10,-1),new s("nos",-1,-1)],q=[new s("ando",-1,6),new s("iendo",-1,6),new s("yendo",-1,7),new s("ándo",-1,2),new s("iéndo",-1,1),new s("ar",-1,6),new s("er",-1,6),new s("ir",-1,6),new s("ár",-1,3),new s("ér",-1,4),new s("ír",-1,5)],C=[new s("ic",-1,-1),new s("ad",-1,-1),new s("os",-1,-1),new s("iv",-1,1)],P=[new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,1)],F=[new s("ic",-1,1),new s("abil",-1,1),new s("iv",-1,1)],S=[new s("ica",-1,1),new s("ancia",-1,2),new s("encia",-1,5),new s("adora",-1,2),new s("osa",-1,1),new s("ista",-1,1),new s("iva",-1,9),new s("anza",-1,1),new s("logía",-1,3),new s("idad",-1,8),new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,2),new s("mente",-1,7),new s("amente",13,6),new s("ación",-1,2),new s("ución",-1,4),new s("ico",-1,1),new s("ismo",-1,1),new s("oso",-1,1),new s("amiento",-1,1),new s("imiento",-1,1),new s("ivo",-1,9),new s("ador",-1,2),new s("icas",-1,1),new s("ancias",-1,2),new s("encias",-1,5),new s("adoras",-1,2),new s("osas",-1,1),new s("istas",-1,1),new s("ivas",-1,9),new s("anzas",-1,1),new s("logías",-1,3),new s("idades",-1,8),new s("ables",-1,1),new s("ibles",-1,1),new s("aciones",-1,2),new s("uciones",-1,4),new s("adores",-1,2),new s("antes",-1,2),new s("icos",-1,1),new s("ismos",-1,1),new s("osos",-1,1),new s("amientos",-1,1),new s("imientos",-1,1),new s("ivos",-1,9)],W=[new s("ya",-1,1),new s("ye",-1,1),new s("yan",-1,1),new s("yen",-1,1),new s("yeron",-1,1),new s("yendo",-1,1),new s("yo",-1,1),new s("yas",-1,1),new s("yes",-1,1),new s("yais",-1,1),new s("yamos",-1,1),new s("yó",-1,1)],L=[new s("aba",-1,2),new s("ada",-1,2),new s("ida",-1,2),new s("ara",-1,2),new s("iera",-1,2),new s("ía",-1,2),new s("aría",5,2),new s("ería",5,2),new s("iría",5,2),new s("ad",-1,2),new s("ed",-1,2),new s("id",-1,2),new s("ase",-1,2),new s("iese",-1,2),new s("aste",-1,2),new s("iste",-1,2),new s("an",-1,2),new s("aban",16,2),new s("aran",16,2),new s("ieran",16,2),new s("ían",16,2),new s("arían",20,2),new s("erían",20,2),new s("irían",20,2),new s("en",-1,1),new s("asen",24,2),new s("iesen",24,2),new s("aron",-1,2),new s("ieron",-1,2),new s("arán",-1,2),new s("erán",-1,2),new s("irán",-1,2),new s("ado",-1,2),new s("ido",-1,2),new s("ando",-1,2),new s("iendo",-1,2),new s("ar",-1,2),new s("er",-1,2),new s("ir",-1,2),new s("as",-1,2),new s("abas",39,2),new s("adas",39,2),new s("idas",39,2),new s("aras",39,2),new s("ieras",39,2),new s("ías",39,2),new s("arías",45,2),new s("erías",45,2),new s("irías",45,2),new s("es",-1,1),new s("ases",49,2),new s("ieses",49,2),new s("abais",-1,2),new s("arais",-1,2),new s("ierais",-1,2),new s("íais",-1,2),new s("aríais",55,2),new s("eríais",55,2),new s("iríais",55,2),new s("aseis",-1,2),new s("ieseis",-1,2),new s("asteis",-1,2),new s("isteis",-1,2),new s("áis",-1,2),new s("éis",-1,1),new s("aréis",64,2),new s("eréis",64,2),new s("iréis",64,2),new s("ados",-1,2),new s("idos",-1,2),new s("amos",-1,2),new s("ábamos",70,2),new s("áramos",70,2),new s("iéramos",70,2),new s("íamos",70,2),new s("aríamos",74,2),new s("eríamos",74,2),new s("iríamos",74,2),new s("emos",-1,1),new s("aremos",78,2),new s("eremos",78,2),new s("iremos",78,2),new s("ásemos",78,2),new s("iésemos",78,2),new s("imos",-1,2),new s("arás",-1,2),new s("erás",-1,2),new s("irás",-1,2),new s("ís",-1,2),new s("ará",-1,2),new s("erá",-1,2),new s("irá",-1,2),new s("aré",-1,2),new s("eré",-1,2),new s("iré",-1,2),new s("ió",-1,2)],z=[new s("a",-1,1),new s("e",-1,2),new s("o",-1,1),new s("os",-1,1),new s("á",-1,1),new s("é",-1,2),new s("í",-1,1),new s("ó",-1,1)],x=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],A=new r;this.setCurrent=function(e){A.setCurrent(e)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return t(),A.limit_backward=e,A.cursor=A.limit,m(),A.cursor=A.limit,b()||(A.cursor=A.limit,f()||(A.cursor=A.limit,_())),A.cursor=A.limit,h(),A.cursor=A.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.fi.min.js b/1.3/assets/javascripts/lunr/min/lunr.fi.min.js new file mode 100644 index 00000000..29f5dfce --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.fi.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Finnish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(i,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(i.lunr)}(this,function(){return function(i){if(void 0===i)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===i.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");i.fi=function(){this.pipeline.reset(),this.pipeline.add(i.fi.trimmer,i.fi.stopWordFilter,i.fi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(i.fi.stemmer))},i.fi.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",i.fi.trimmer=i.trimmerSupport.generateTrimmer(i.fi.wordCharacters),i.Pipeline.registerFunction(i.fi.trimmer,"trimmer-fi"),i.fi.stemmer=function(){var e=i.stemmerSupport.Among,r=i.stemmerSupport.SnowballProgram,n=new function(){function i(){f=A.limit,d=f,n()||(f=A.cursor,n()||(d=A.cursor))}function n(){for(var i;;){if(i=A.cursor,A.in_grouping(W,97,246))break;if(A.cursor=i,i>=A.limit)return!0;A.cursor++}for(A.cursor=i;!A.out_grouping(W,97,246);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}function t(){return d<=A.cursor}function s(){var i,e;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(h,10)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.in_grouping_b(x,97,246))return;break;case 2:if(!t())return}A.slice_del()}else A.limit_backward=e}function o(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(v,9))switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"k")||(A.cursor=A.limit-r,A.slice_del());break;case 2:A.slice_del(),A.ket=A.cursor,A.eq_s_b(3,"kse")&&(A.bra=A.cursor,A.slice_from("ksi"));break;case 3:A.slice_del();break;case 4:A.find_among_b(p,6)&&A.slice_del();break;case 5:A.find_among_b(g,6)&&A.slice_del();break;case 6:A.find_among_b(j,2)&&A.slice_del()}else A.limit_backward=e}function l(){return A.find_among_b(q,7)}function a(){return A.eq_s_b(1,"i")&&A.in_grouping_b(L,97,246)}function u(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(C,30)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.eq_s_b(1,"a"))return;break;case 2:case 9:if(!A.eq_s_b(1,"e"))return;break;case 3:if(!A.eq_s_b(1,"i"))return;break;case 4:if(!A.eq_s_b(1,"o"))return;break;case 5:if(!A.eq_s_b(1,"ä"))return;break;case 6:if(!A.eq_s_b(1,"ö"))return;break;case 7:if(r=A.limit-A.cursor,!l()&&(A.cursor=A.limit-r,!A.eq_s_b(2,"ie"))){A.cursor=A.limit-r;break}if(A.cursor=A.limit-r,A.cursor<=A.limit_backward){A.cursor=A.limit-r;break}A.cursor--,A.bra=A.cursor;break;case 8:if(!A.in_grouping_b(W,97,246)||!A.out_grouping_b(W,97,246))return}A.slice_del(),k=!0}else A.limit_backward=e}function c(){var i,e,r;if(A.cursor>=d)if(e=A.limit_backward,A.limit_backward=d,A.ket=A.cursor,i=A.find_among_b(P,14)){if(A.bra=A.cursor,A.limit_backward=e,1==i){if(r=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-r}A.slice_del()}else A.limit_backward=e}function m(){var i;A.cursor>=f&&(i=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.find_among_b(F,2)?(A.bra=A.cursor,A.limit_backward=i,A.slice_del()):A.limit_backward=i)}function w(){var i,e,r,n,t,s;if(A.cursor>=f){if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.eq_s_b(1,"t")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.in_grouping_b(W,97,246)&&(A.cursor=A.limit-r,A.slice_del(),A.limit_backward=e,n=A.limit-A.cursor,A.cursor>=d&&(A.cursor=d,t=A.limit_backward,A.limit_backward=A.cursor,A.cursor=A.limit-n,A.ket=A.cursor,i=A.find_among_b(S,2))))){if(A.bra=A.cursor,A.limit_backward=t,1==i){if(s=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-s}return void A.slice_del()}A.limit_backward=e}}function _(){var i,e,r,n;if(A.cursor>=f){for(i=A.limit_backward,A.limit_backward=f,e=A.limit-A.cursor,l()&&(A.cursor=A.limit-e,A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.in_grouping_b(y,97,228)&&(A.bra=A.cursor,A.out_grouping_b(W,97,246)&&A.slice_del()),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"j")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.eq_s_b(1,"o")?A.slice_del():(A.cursor=A.limit-r,A.eq_s_b(1,"u")&&A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"o")&&(A.bra=A.cursor,A.eq_s_b(1,"j")&&A.slice_del()),A.cursor=A.limit-e,A.limit_backward=i;;){if(n=A.limit-A.cursor,A.out_grouping_b(W,97,246)){A.cursor=A.limit-n;break}if(A.cursor=A.limit-n,A.cursor<=A.limit_backward)return;A.cursor--}A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,b=A.slice_to(),A.eq_v_b(b)&&A.slice_del())}}var k,b,d,f,h=[new e("pa",-1,1),new e("sti",-1,2),new e("kaan",-1,1),new e("han",-1,1),new e("kin",-1,1),new e("hän",-1,1),new e("kään",-1,1),new e("ko",-1,1),new e("pä",-1,1),new e("kö",-1,1)],p=[new e("lla",-1,-1),new e("na",-1,-1),new e("ssa",-1,-1),new e("ta",-1,-1),new e("lta",3,-1),new e("sta",3,-1)],g=[new e("llä",-1,-1),new e("nä",-1,-1),new e("ssä",-1,-1),new e("tä",-1,-1),new e("ltä",3,-1),new e("stä",3,-1)],j=[new e("lle",-1,-1),new e("ine",-1,-1)],v=[new e("nsa",-1,3),new e("mme",-1,3),new e("nne",-1,3),new e("ni",-1,2),new e("si",-1,1),new e("an",-1,4),new e("en",-1,6),new e("än",-1,5),new e("nsä",-1,3)],q=[new e("aa",-1,-1),new e("ee",-1,-1),new e("ii",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1),new e("ää",-1,-1),new e("öö",-1,-1)],C=[new e("a",-1,8),new e("lla",0,-1),new e("na",0,-1),new e("ssa",0,-1),new e("ta",0,-1),new e("lta",4,-1),new e("sta",4,-1),new e("tta",4,9),new e("lle",-1,-1),new e("ine",-1,-1),new e("ksi",-1,-1),new e("n",-1,7),new e("han",11,1),new e("den",11,-1,a),new e("seen",11,-1,l),new e("hen",11,2),new e("tten",11,-1,a),new e("hin",11,3),new e("siin",11,-1,a),new e("hon",11,4),new e("hän",11,5),new e("hön",11,6),new e("ä",-1,8),new e("llä",22,-1),new e("nä",22,-1),new e("ssä",22,-1),new e("tä",22,-1),new e("ltä",26,-1),new e("stä",26,-1),new e("ttä",26,9)],P=[new e("eja",-1,-1),new e("mma",-1,1),new e("imma",1,-1),new e("mpa",-1,1),new e("impa",3,-1),new e("mmi",-1,1),new e("immi",5,-1),new e("mpi",-1,1),new e("impi",7,-1),new e("ejä",-1,-1),new e("mmä",-1,1),new e("immä",10,-1),new e("mpä",-1,1),new e("impä",12,-1)],F=[new e("i",-1,-1),new e("j",-1,-1)],S=[new e("mma",-1,1),new e("imma",0,-1)],y=[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8],W=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],x=[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],A=new r;this.setCurrent=function(i){A.setCurrent(i)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return i(),k=!1,A.limit_backward=e,A.cursor=A.limit,s(),A.cursor=A.limit,o(),A.cursor=A.limit,u(),A.cursor=A.limit,c(),A.cursor=A.limit,k?(m(),A.cursor=A.limit):(A.cursor=A.limit,w(),A.cursor=A.limit),_(),!0}};return function(i){return"function"==typeof i.update?i.update(function(i){return n.setCurrent(i),n.stem(),n.getCurrent()}):(n.setCurrent(i),n.stem(),n.getCurrent())}}(),i.Pipeline.registerFunction(i.fi.stemmer,"stemmer-fi"),i.fi.stopWordFilter=i.generateStopWordFilter("ei eivät emme en et ette että he heidän heidät heihin heille heillä heiltä heissä heistä heitä hän häneen hänelle hänellä häneltä hänen hänessä hänestä hänet häntä itse ja johon joiden joihin joiksi joilla joille joilta joina joissa joista joita joka joksi jolla jolle jolta jona jonka jos jossa josta jota jotka kanssa keiden keihin keiksi keille keillä keiltä keinä keissä keistä keitä keneen keneksi kenelle kenellä keneltä kenen kenenä kenessä kenestä kenet ketkä ketkä ketä koska kuin kuka kun me meidän meidät meihin meille meillä meiltä meissä meistä meitä mihin miksi mikä mille millä miltä minkä minkä minua minulla minulle minulta minun minussa minusta minut minuun minä minä missä mistä mitkä mitä mukaan mutta ne niiden niihin niiksi niille niillä niiltä niin niin niinä niissä niistä niitä noiden noihin noiksi noilla noille noilta noin noina noissa noista noita nuo nyt näiden näihin näiksi näille näillä näiltä näinä näissä näistä näitä nämä ole olemme olen olet olette oli olimme olin olisi olisimme olisin olisit olisitte olisivat olit olitte olivat olla olleet ollut on ovat poikki se sekä sen siihen siinä siitä siksi sille sillä sillä siltä sinua sinulla sinulle sinulta sinun sinussa sinusta sinut sinuun sinä sinä sitä tai te teidän teidät teihin teille teillä teiltä teissä teistä teitä tuo tuohon tuoksi tuolla tuolle tuolta tuon tuona tuossa tuosta tuota tähän täksi tälle tällä tältä tämä tämän tänä tässä tästä tätä vaan vai vaikka yli".split(" ")),i.Pipeline.registerFunction(i.fi.stopWordFilter,"stopWordFilter-fi")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.fr.min.js b/1.3/assets/javascripts/lunr/min/lunr.fr.min.js new file mode 100644 index 00000000..68cd0094 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.fr.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `French` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.fr=function(){this.pipeline.reset(),this.pipeline.add(e.fr.trimmer,e.fr.stopWordFilter,e.fr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.fr.stemmer))},e.fr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.fr.trimmer=e.trimmerSupport.generateTrimmer(e.fr.wordCharacters),e.Pipeline.registerFunction(e.fr.trimmer,"trimmer-fr"),e.fr.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,s){return!(!W.eq_s(1,e)||(W.ket=W.cursor,!W.in_grouping(F,97,251)))&&(W.slice_from(r),W.cursor=s,!0)}function i(e,r,s){return!!W.eq_s(1,e)&&(W.ket=W.cursor,W.slice_from(r),W.cursor=s,!0)}function n(){for(var r,s;;){if(r=W.cursor,W.in_grouping(F,97,251)){if(W.bra=W.cursor,s=W.cursor,e("u","U",r))continue;if(W.cursor=s,e("i","I",r))continue;if(W.cursor=s,i("y","Y",r))continue}if(W.cursor=r,W.bra=r,!e("y","Y",r)){if(W.cursor=r,W.eq_s(1,"q")&&(W.bra=W.cursor,i("u","U",r)))continue;if(W.cursor=r,r>=W.limit)return;W.cursor++}}}function t(){for(;!W.in_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}for(;!W.out_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}return!1}function u(){var e=W.cursor;if(q=W.limit,g=q,p=q,W.in_grouping(F,97,251)&&W.in_grouping(F,97,251)&&W.cursor=W.limit){W.cursor=q;break}W.cursor++}while(!W.in_grouping(F,97,251))}q=W.cursor,W.cursor=e,t()||(g=W.cursor,t()||(p=W.cursor))}function o(){for(var e,r;;){if(r=W.cursor,W.bra=r,!(e=W.find_among(h,4)))break;switch(W.ket=W.cursor,e){case 1:W.slice_from("i");break;case 2:W.slice_from("u");break;case 3:W.slice_from("y");break;case 4:if(W.cursor>=W.limit)return;W.cursor++}}}function c(){return q<=W.cursor}function a(){return g<=W.cursor}function l(){return p<=W.cursor}function w(){var e,r;if(W.ket=W.cursor,e=W.find_among_b(C,43)){switch(W.bra=W.cursor,e){case 1:if(!l())return!1;W.slice_del();break;case 2:if(!l())return!1;W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")&&(W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU"));break;case 3:if(!l())return!1;W.slice_from("log");break;case 4:if(!l())return!1;W.slice_from("u");break;case 5:if(!l())return!1;W.slice_from("ent");break;case 6:if(!c())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(z,6))switch(W.bra=W.cursor,e){case 1:l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&W.slice_del()));break;case 2:l()?W.slice_del():a()&&W.slice_from("eux");break;case 3:l()&&W.slice_del();break;case 4:c()&&W.slice_from("i")}break;case 7:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(y,3))switch(W.bra=W.cursor,e){case 1:l()?W.slice_del():W.slice_from("abl");break;case 2:l()?W.slice_del():W.slice_from("iqU");break;case 3:l()&&W.slice_del()}break;case 8:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")))){W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU");break}break;case 9:W.slice_from("eau");break;case 10:if(!a())return!1;W.slice_from("al");break;case 11:if(l())W.slice_del();else{if(!a())return!1;W.slice_from("eux")}break;case 12:if(!a()||!W.out_grouping_b(F,97,251))return!1;W.slice_del();break;case 13:return c()&&W.slice_from("ant"),!1;case 14:return c()&&W.slice_from("ent"),!1;case 15:return r=W.limit-W.cursor,W.in_grouping_b(F,97,251)&&c()&&(W.cursor=W.limit-r,W.slice_del()),!1}return!0}return!1}function f(){var e,r;if(W.cursor=q){if(s=W.limit_backward,W.limit_backward=q,W.ket=W.cursor,e=W.find_among_b(P,7))switch(W.bra=W.cursor,e){case 1:if(l()){if(i=W.limit-W.cursor,!W.eq_s_b(1,"s")&&(W.cursor=W.limit-i,!W.eq_s_b(1,"t")))break;W.slice_del()}break;case 2:W.slice_from("i");break;case 3:W.slice_del();break;case 4:W.eq_s_b(2,"gu")&&W.slice_del()}W.limit_backward=s}}function b(){var e=W.limit-W.cursor;W.find_among_b(U,5)&&(W.cursor=W.limit-e,W.ket=W.cursor,W.cursor>W.limit_backward&&(W.cursor--,W.bra=W.cursor,W.slice_del()))}function d(){for(var e,r=1;W.out_grouping_b(F,97,251);)r--;if(r<=0){if(W.ket=W.cursor,e=W.limit-W.cursor,!W.eq_s_b(1,"é")&&(W.cursor=W.limit-e,!W.eq_s_b(1,"è")))return;W.bra=W.cursor,W.slice_from("e")}}function k(){if(!w()&&(W.cursor=W.limit,!f()&&(W.cursor=W.limit,!m())))return W.cursor=W.limit,void _();W.cursor=W.limit,W.ket=W.cursor,W.eq_s_b(1,"Y")?(W.bra=W.cursor,W.slice_from("i")):(W.cursor=W.limit,W.eq_s_b(1,"ç")&&(W.bra=W.cursor,W.slice_from("c")))}var p,g,q,v=[new r("col",-1,-1),new r("par",-1,-1),new r("tap",-1,-1)],h=[new r("",-1,4),new r("I",0,1),new r("U",0,2),new r("Y",0,3)],z=[new r("iqU",-1,3),new r("abl",-1,3),new r("Ièr",-1,4),new r("ièr",-1,4),new r("eus",-1,2),new r("iv",-1,1)],y=[new r("ic",-1,2),new r("abil",-1,1),new r("iv",-1,3)],C=[new r("iqUe",-1,1),new r("atrice",-1,2),new r("ance",-1,1),new r("ence",-1,5),new r("logie",-1,3),new r("able",-1,1),new r("isme",-1,1),new r("euse",-1,11),new r("iste",-1,1),new r("ive",-1,8),new r("if",-1,8),new r("usion",-1,4),new r("ation",-1,2),new r("ution",-1,4),new r("ateur",-1,2),new r("iqUes",-1,1),new r("atrices",-1,2),new r("ances",-1,1),new r("ences",-1,5),new r("logies",-1,3),new r("ables",-1,1),new r("ismes",-1,1),new r("euses",-1,11),new r("istes",-1,1),new r("ives",-1,8),new r("ifs",-1,8),new r("usions",-1,4),new r("ations",-1,2),new r("utions",-1,4),new r("ateurs",-1,2),new r("ments",-1,15),new r("ements",30,6),new r("issements",31,12),new r("ités",-1,7),new r("ment",-1,15),new r("ement",34,6),new r("issement",35,12),new r("amment",34,13),new r("emment",34,14),new r("aux",-1,10),new r("eaux",39,9),new r("eux",-1,1),new r("ité",-1,7)],x=[new r("ira",-1,1),new r("ie",-1,1),new r("isse",-1,1),new r("issante",-1,1),new r("i",-1,1),new r("irai",4,1),new r("ir",-1,1),new r("iras",-1,1),new r("ies",-1,1),new r("îmes",-1,1),new r("isses",-1,1),new r("issantes",-1,1),new r("îtes",-1,1),new r("is",-1,1),new r("irais",13,1),new r("issais",13,1),new r("irions",-1,1),new r("issions",-1,1),new r("irons",-1,1),new r("issons",-1,1),new r("issants",-1,1),new r("it",-1,1),new r("irait",21,1),new r("issait",21,1),new r("issant",-1,1),new r("iraIent",-1,1),new r("issaIent",-1,1),new r("irent",-1,1),new r("issent",-1,1),new r("iront",-1,1),new r("ît",-1,1),new r("iriez",-1,1),new r("issiez",-1,1),new r("irez",-1,1),new r("issez",-1,1)],I=[new r("a",-1,3),new r("era",0,2),new r("asse",-1,3),new r("ante",-1,3),new r("ée",-1,2),new r("ai",-1,3),new r("erai",5,2),new r("er",-1,2),new r("as",-1,3),new r("eras",8,2),new r("âmes",-1,3),new r("asses",-1,3),new r("antes",-1,3),new r("âtes",-1,3),new r("ées",-1,2),new r("ais",-1,3),new r("erais",15,2),new r("ions",-1,1),new r("erions",17,2),new r("assions",17,3),new r("erons",-1,2),new r("ants",-1,3),new r("és",-1,2),new r("ait",-1,3),new r("erait",23,2),new r("ant",-1,3),new r("aIent",-1,3),new r("eraIent",26,2),new r("èrent",-1,2),new r("assent",-1,3),new r("eront",-1,2),new r("ât",-1,3),new r("ez",-1,2),new r("iez",32,2),new r("eriez",33,2),new r("assiez",33,3),new r("erez",32,2),new r("é",-1,2)],P=[new r("e",-1,3),new r("Ière",0,2),new r("ière",0,2),new r("ion",-1,1),new r("Ier",-1,2),new r("ier",-1,2),new r("ë",-1,4)],U=[new r("ell",-1,-1),new r("eill",-1,-1),new r("enn",-1,-1),new r("onn",-1,-1),new r("ett",-1,-1)],F=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5],S=[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128],W=new s;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){var e=W.cursor;return n(),W.cursor=e,u(),W.limit_backward=e,W.cursor=W.limit,k(),W.cursor=W.limit,b(),W.cursor=W.limit,d(),W.cursor=W.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.fr.stemmer,"stemmer-fr"),e.fr.stopWordFilter=e.generateStopWordFilter("ai aie aient aies ait as au aura aurai auraient aurais aurait auras aurez auriez aurions aurons auront aux avaient avais avait avec avez aviez avions avons ayant ayez ayons c ce ceci celà ces cet cette d dans de des du elle en es est et eu eue eues eurent eus eusse eussent eusses eussiez eussions eut eux eûmes eût eûtes furent fus fusse fussent fusses fussiez fussions fut fûmes fût fûtes ici il ils j je l la le les leur leurs lui m ma mais me mes moi mon même n ne nos notre nous on ont ou par pas pour qu que quel quelle quelles quels qui s sa sans se sera serai seraient serais serait seras serez seriez serions serons seront ses soi soient sois soit sommes son sont soyez soyons suis sur t ta te tes toi ton tu un une vos votre vous y à étaient étais était étant étiez étions été étée étées étés êtes".split(" ")),e.Pipeline.registerFunction(e.fr.stopWordFilter,"stopWordFilter-fr")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.hu.min.js b/1.3/assets/javascripts/lunr/min/lunr.hu.min.js new file mode 100644 index 00000000..ed9d909f --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.hu.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Hungarian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hu=function(){this.pipeline.reset(),this.pipeline.add(e.hu.trimmer,e.hu.stopWordFilter,e.hu.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hu.stemmer))},e.hu.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.hu.trimmer=e.trimmerSupport.generateTrimmer(e.hu.wordCharacters),e.Pipeline.registerFunction(e.hu.trimmer,"trimmer-hu"),e.hu.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,n=L.cursor;if(d=L.limit,L.in_grouping(W,97,252))for(;;){if(e=L.cursor,L.out_grouping(W,97,252))return L.cursor=e,L.find_among(g,8)||(L.cursor=e,e=L.limit)return void(d=e);L.cursor++}if(L.cursor=n,L.out_grouping(W,97,252)){for(;!L.in_grouping(W,97,252);){if(L.cursor>=L.limit)return;L.cursor++}d=L.cursor}}function i(){return d<=L.cursor}function a(){var e;if(L.ket=L.cursor,(e=L.find_among_b(h,2))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e")}}function t(){var e=L.limit-L.cursor;return!!L.find_among_b(p,23)&&(L.cursor=L.limit-e,!0)}function s(){if(L.cursor>L.limit_backward){L.cursor--,L.ket=L.cursor;var e=L.cursor-1;L.limit_backward<=e&&e<=L.limit&&(L.cursor=e,L.bra=e,L.slice_del())}}function c(){var e;if(L.ket=L.cursor,(e=L.find_among_b(_,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function o(){L.ket=L.cursor,L.find_among_b(v,44)&&(L.bra=L.cursor,i()&&(L.slice_del(),a()))}function w(){var e;if(L.ket=L.cursor,(e=L.find_among_b(z,3))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("e");break;case 2:case 3:L.slice_from("a")}}function l(){var e;if(L.ket=L.cursor,(e=L.find_among_b(y,6))&&(L.bra=L.cursor,i()))switch(e){case 1:case 2:L.slice_del();break;case 3:L.slice_from("a");break;case 4:L.slice_from("e")}}function u(){var e;if(L.ket=L.cursor,(e=L.find_among_b(j,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function m(){var e;if(L.ket=L.cursor,(e=L.find_among_b(C,7))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e");break;case 3:case 4:case 5:case 6:case 7:L.slice_del()}}function k(){var e;if(L.ket=L.cursor,(e=L.find_among_b(P,12))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 9:L.slice_del();break;case 2:case 5:case 8:L.slice_from("e");break;case 3:case 6:L.slice_from("a")}}function f(){var e;if(L.ket=L.cursor,(e=L.find_among_b(F,31))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 8:case 9:case 12:case 13:case 16:case 17:case 18:L.slice_del();break;case 2:case 5:case 10:case 14:case 19:L.slice_from("a");break;case 3:case 6:case 11:case 15:case 20:L.slice_from("e")}}function b(){var e;if(L.ket=L.cursor,(e=L.find_among_b(S,42))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 5:case 6:case 9:case 10:case 11:case 14:case 15:case 16:case 17:case 20:case 21:case 24:case 25:case 26:case 29:L.slice_del();break;case 2:case 7:case 12:case 18:case 22:case 27:L.slice_from("a");break;case 3:case 8:case 13:case 19:case 23:case 28:L.slice_from("e")}}var d,g=[new n("cs",-1,-1),new n("dzs",-1,-1),new n("gy",-1,-1),new n("ly",-1,-1),new n("ny",-1,-1),new n("sz",-1,-1),new n("ty",-1,-1),new n("zs",-1,-1)],h=[new n("á",-1,1),new n("é",-1,2)],p=[new n("bb",-1,-1),new n("cc",-1,-1),new n("dd",-1,-1),new n("ff",-1,-1),new n("gg",-1,-1),new n("jj",-1,-1),new n("kk",-1,-1),new n("ll",-1,-1),new n("mm",-1,-1),new n("nn",-1,-1),new n("pp",-1,-1),new n("rr",-1,-1),new n("ccs",-1,-1),new n("ss",-1,-1),new n("zzs",-1,-1),new n("tt",-1,-1),new n("vv",-1,-1),new n("ggy",-1,-1),new n("lly",-1,-1),new n("nny",-1,-1),new n("tty",-1,-1),new n("ssz",-1,-1),new n("zz",-1,-1)],_=[new n("al",-1,1),new n("el",-1,2)],v=[new n("ba",-1,-1),new n("ra",-1,-1),new n("be",-1,-1),new n("re",-1,-1),new n("ig",-1,-1),new n("nak",-1,-1),new n("nek",-1,-1),new n("val",-1,-1),new n("vel",-1,-1),new n("ul",-1,-1),new n("nál",-1,-1),new n("nél",-1,-1),new n("ból",-1,-1),new n("ról",-1,-1),new n("tól",-1,-1),new n("bõl",-1,-1),new n("rõl",-1,-1),new n("tõl",-1,-1),new n("ül",-1,-1),new n("n",-1,-1),new n("an",19,-1),new n("ban",20,-1),new n("en",19,-1),new n("ben",22,-1),new n("képpen",22,-1),new n("on",19,-1),new n("ön",19,-1),new n("képp",-1,-1),new n("kor",-1,-1),new n("t",-1,-1),new n("at",29,-1),new n("et",29,-1),new n("ként",29,-1),new n("anként",32,-1),new n("enként",32,-1),new n("onként",32,-1),new n("ot",29,-1),new n("ért",29,-1),new n("öt",29,-1),new n("hez",-1,-1),new n("hoz",-1,-1),new n("höz",-1,-1),new n("vá",-1,-1),new n("vé",-1,-1)],z=[new n("án",-1,2),new n("én",-1,1),new n("ánként",-1,3)],y=[new n("stul",-1,2),new n("astul",0,1),new n("ástul",0,3),new n("stül",-1,2),new n("estül",3,1),new n("éstül",3,4)],j=[new n("á",-1,1),new n("é",-1,2)],C=[new n("k",-1,7),new n("ak",0,4),new n("ek",0,6),new n("ok",0,5),new n("ák",0,1),new n("ék",0,2),new n("ök",0,3)],P=[new n("éi",-1,7),new n("áéi",0,6),new n("ééi",0,5),new n("é",-1,9),new n("ké",3,4),new n("aké",4,1),new n("eké",4,1),new n("oké",4,1),new n("áké",4,3),new n("éké",4,2),new n("öké",4,1),new n("éé",3,8)],F=[new n("a",-1,18),new n("ja",0,17),new n("d",-1,16),new n("ad",2,13),new n("ed",2,13),new n("od",2,13),new n("ád",2,14),new n("éd",2,15),new n("öd",2,13),new n("e",-1,18),new n("je",9,17),new n("nk",-1,4),new n("unk",11,1),new n("ánk",11,2),new n("énk",11,3),new n("ünk",11,1),new n("uk",-1,8),new n("juk",16,7),new n("ájuk",17,5),new n("ük",-1,8),new n("jük",19,7),new n("éjük",20,6),new n("m",-1,12),new n("am",22,9),new n("em",22,9),new n("om",22,9),new n("ám",22,10),new n("ém",22,11),new n("o",-1,18),new n("á",-1,19),new n("é",-1,20)],S=[new n("id",-1,10),new n("aid",0,9),new n("jaid",1,6),new n("eid",0,9),new n("jeid",3,6),new n("áid",0,7),new n("éid",0,8),new n("i",-1,15),new n("ai",7,14),new n("jai",8,11),new n("ei",7,14),new n("jei",10,11),new n("ái",7,12),new n("éi",7,13),new n("itek",-1,24),new n("eitek",14,21),new n("jeitek",15,20),new n("éitek",14,23),new n("ik",-1,29),new n("aik",18,26),new n("jaik",19,25),new n("eik",18,26),new n("jeik",21,25),new n("áik",18,27),new n("éik",18,28),new n("ink",-1,20),new n("aink",25,17),new n("jaink",26,16),new n("eink",25,17),new n("jeink",28,16),new n("áink",25,18),new n("éink",25,19),new n("aitok",-1,21),new n("jaitok",32,20),new n("áitok",-1,22),new n("im",-1,5),new n("aim",35,4),new n("jaim",36,1),new n("eim",35,4),new n("jeim",38,1),new n("áim",35,2),new n("éim",35,3)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var n=L.cursor;return e(),L.limit_backward=n,L.cursor=L.limit,c(),L.cursor=L.limit,o(),L.cursor=L.limit,w(),L.cursor=L.limit,l(),L.cursor=L.limit,u(),L.cursor=L.limit,k(),L.cursor=L.limit,f(),L.cursor=L.limit,b(),L.cursor=L.limit,m(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.hu.stemmer,"stemmer-hu"),e.hu.stopWordFilter=e.generateStopWordFilter("a abban ahhoz ahogy ahol aki akik akkor alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elsõ elég elõ elõször elõtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé hanem hiszen hogy hogyan igen ill ill. illetve ilyen ilyenkor ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül legalább legyen lehet lehetett lenne lenni lesz lett maga magát majd majd meg mellett mely melyek mert mi mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül olyan ott pedig persze rá s saját sem semmi sok sokat sokkal szemben szerint szinte számára talán tehát teljes tovább továbbá több ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így õ õk õket össze úgy új újabb újra".split(" ")),e.Pipeline.registerFunction(e.hu.stopWordFilter,"stopWordFilter-hu")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.it.min.js b/1.3/assets/javascripts/lunr/min/lunr.it.min.js new file mode 100644 index 00000000..344b6a3c --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.it.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Italian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.it=function(){this.pipeline.reset(),this.pipeline.add(e.it.trimmer,e.it.stopWordFilter,e.it.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.it.stemmer))},e.it.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.it.trimmer=e.trimmerSupport.generateTrimmer(e.it.wordCharacters),e.Pipeline.registerFunction(e.it.trimmer,"trimmer-it"),e.it.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!x.eq_s(1,e)||(x.ket=x.cursor,!x.in_grouping(L,97,249)))&&(x.slice_from(r),x.cursor=n,!0)}function i(){for(var r,n,i,o,t=x.cursor;;){if(x.bra=x.cursor,r=x.find_among(h,7))switch(x.ket=x.cursor,r){case 1:x.slice_from("à");continue;case 2:x.slice_from("è");continue;case 3:x.slice_from("ì");continue;case 4:x.slice_from("ò");continue;case 5:x.slice_from("ù");continue;case 6:x.slice_from("qU");continue;case 7:if(x.cursor>=x.limit)break;x.cursor++;continue}break}for(x.cursor=t;;)for(n=x.cursor;;){if(i=x.cursor,x.in_grouping(L,97,249)){if(x.bra=x.cursor,o=x.cursor,e("u","U",i))break;if(x.cursor=o,e("i","I",i))break}if(x.cursor=i,x.cursor>=x.limit)return void(x.cursor=n);x.cursor++}}function o(e){if(x.cursor=e,!x.in_grouping(L,97,249))return!1;for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function t(){if(x.in_grouping(L,97,249)){var e=x.cursor;if(x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return o(e);x.cursor++}return!0}return o(e)}return!1}function s(){var e,r=x.cursor;if(!t()){if(x.cursor=r,!x.out_grouping(L,97,249))return;if(e=x.cursor,x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return x.cursor=e,void(x.in_grouping(L,97,249)&&x.cursor=x.limit)return;x.cursor++}k=x.cursor}function a(){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function u(){var e=x.cursor;k=x.limit,p=k,g=k,s(),x.cursor=e,a()&&(p=x.cursor,a()&&(g=x.cursor))}function c(){for(var e;;){if(x.bra=x.cursor,!(e=x.find_among(q,3)))break;switch(x.ket=x.cursor,e){case 1:x.slice_from("i");break;case 2:x.slice_from("u");break;case 3:if(x.cursor>=x.limit)return;x.cursor++}}}function w(){return k<=x.cursor}function l(){return p<=x.cursor}function m(){return g<=x.cursor}function f(){var e;if(x.ket=x.cursor,x.find_among_b(C,37)&&(x.bra=x.cursor,(e=x.find_among_b(z,5))&&w()))switch(e){case 1:x.slice_del();break;case 2:x.slice_from("e")}}function v(){var e;if(x.ket=x.cursor,!(e=x.find_among_b(S,51)))return!1;switch(x.bra=x.cursor,e){case 1:if(!m())return!1;x.slice_del();break;case 2:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del());break;case 3:if(!m())return!1;x.slice_from("log");break;case 4:if(!m())return!1;x.slice_from("u");break;case 5:if(!m())return!1;x.slice_from("ente");break;case 6:if(!w())return!1;x.slice_del();break;case 7:if(!l())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(P,4),e&&(x.bra=x.cursor,m()&&(x.slice_del(),1==e&&(x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&x.slice_del()))));break;case 8:if(!m())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(F,3),e&&(x.bra=x.cursor,1==e&&m()&&x.slice_del());break;case 9:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del())))}return!0}function b(){var e,r;x.cursor>=k&&(r=x.limit_backward,x.limit_backward=k,x.ket=x.cursor,e=x.find_among_b(W,87),e&&(x.bra=x.cursor,1==e&&x.slice_del()),x.limit_backward=r)}function d(){var e=x.limit-x.cursor;if(x.ket=x.cursor,x.in_grouping_b(y,97,242)&&(x.bra=x.cursor,w()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(1,"i")&&(x.bra=x.cursor,w()))))return void x.slice_del();x.cursor=x.limit-e}function _(){d(),x.ket=x.cursor,x.eq_s_b(1,"h")&&(x.bra=x.cursor,x.in_grouping_b(U,99,103)&&w()&&x.slice_del())}var g,p,k,h=[new r("",-1,7),new r("qu",0,6),new r("á",0,1),new r("é",0,2),new r("í",0,3),new r("ó",0,4),new r("ú",0,5)],q=[new r("",-1,3),new r("I",0,1),new r("U",0,2)],C=[new r("la",-1,-1),new r("cela",0,-1),new r("gliela",0,-1),new r("mela",0,-1),new r("tela",0,-1),new r("vela",0,-1),new r("le",-1,-1),new r("cele",6,-1),new r("gliele",6,-1),new r("mele",6,-1),new r("tele",6,-1),new r("vele",6,-1),new r("ne",-1,-1),new r("cene",12,-1),new r("gliene",12,-1),new r("mene",12,-1),new r("sene",12,-1),new r("tene",12,-1),new r("vene",12,-1),new r("ci",-1,-1),new r("li",-1,-1),new r("celi",20,-1),new r("glieli",20,-1),new r("meli",20,-1),new r("teli",20,-1),new r("veli",20,-1),new r("gli",20,-1),new r("mi",-1,-1),new r("si",-1,-1),new r("ti",-1,-1),new r("vi",-1,-1),new r("lo",-1,-1),new r("celo",31,-1),new r("glielo",31,-1),new r("melo",31,-1),new r("telo",31,-1),new r("velo",31,-1)],z=[new r("ando",-1,1),new r("endo",-1,1),new r("ar",-1,2),new r("er",-1,2),new r("ir",-1,2)],P=[new r("ic",-1,-1),new r("abil",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],F=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],S=[new r("ica",-1,1),new r("logia",-1,3),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,9),new r("anza",-1,1),new r("enza",-1,5),new r("ice",-1,1),new r("atrice",7,1),new r("iche",-1,1),new r("logie",-1,3),new r("abile",-1,1),new r("ibile",-1,1),new r("usione",-1,4),new r("azione",-1,2),new r("uzione",-1,4),new r("atore",-1,2),new r("ose",-1,1),new r("ante",-1,1),new r("mente",-1,1),new r("amente",19,7),new r("iste",-1,1),new r("ive",-1,9),new r("anze",-1,1),new r("enze",-1,5),new r("ici",-1,1),new r("atrici",25,1),new r("ichi",-1,1),new r("abili",-1,1),new r("ibili",-1,1),new r("ismi",-1,1),new r("usioni",-1,4),new r("azioni",-1,2),new r("uzioni",-1,4),new r("atori",-1,2),new r("osi",-1,1),new r("anti",-1,1),new r("amenti",-1,6),new r("imenti",-1,6),new r("isti",-1,1),new r("ivi",-1,9),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,6),new r("imento",-1,6),new r("ivo",-1,9),new r("ità",-1,8),new r("istà",-1,1),new r("istè",-1,1),new r("istì",-1,1)],W=[new r("isca",-1,1),new r("enda",-1,1),new r("ata",-1,1),new r("ita",-1,1),new r("uta",-1,1),new r("ava",-1,1),new r("eva",-1,1),new r("iva",-1,1),new r("erebbe",-1,1),new r("irebbe",-1,1),new r("isce",-1,1),new r("ende",-1,1),new r("are",-1,1),new r("ere",-1,1),new r("ire",-1,1),new r("asse",-1,1),new r("ate",-1,1),new r("avate",16,1),new r("evate",16,1),new r("ivate",16,1),new r("ete",-1,1),new r("erete",20,1),new r("irete",20,1),new r("ite",-1,1),new r("ereste",-1,1),new r("ireste",-1,1),new r("ute",-1,1),new r("erai",-1,1),new r("irai",-1,1),new r("isci",-1,1),new r("endi",-1,1),new r("erei",-1,1),new r("irei",-1,1),new r("assi",-1,1),new r("ati",-1,1),new r("iti",-1,1),new r("eresti",-1,1),new r("iresti",-1,1),new r("uti",-1,1),new r("avi",-1,1),new r("evi",-1,1),new r("ivi",-1,1),new r("isco",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("Yamo",-1,1),new r("iamo",-1,1),new r("avamo",-1,1),new r("evamo",-1,1),new r("ivamo",-1,1),new r("eremo",-1,1),new r("iremo",-1,1),new r("assimo",-1,1),new r("ammo",-1,1),new r("emmo",-1,1),new r("eremmo",54,1),new r("iremmo",54,1),new r("immo",-1,1),new r("ano",-1,1),new r("iscano",58,1),new r("avano",58,1),new r("evano",58,1),new r("ivano",58,1),new r("eranno",-1,1),new r("iranno",-1,1),new r("ono",-1,1),new r("iscono",65,1),new r("arono",65,1),new r("erono",65,1),new r("irono",65,1),new r("erebbero",-1,1),new r("irebbero",-1,1),new r("assero",-1,1),new r("essero",-1,1),new r("issero",-1,1),new r("ato",-1,1),new r("ito",-1,1),new r("uto",-1,1),new r("avo",-1,1),new r("evo",-1,1),new r("ivo",-1,1),new r("ar",-1,1),new r("ir",-1,1),new r("erà",-1,1),new r("irà",-1,1),new r("erò",-1,1),new r("irò",-1,1)],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1],y=[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2],U=[17],x=new n;this.setCurrent=function(e){x.setCurrent(e)},this.getCurrent=function(){return x.getCurrent()},this.stem=function(){var e=x.cursor;return i(),x.cursor=e,u(),x.limit_backward=e,x.cursor=x.limit,f(),x.cursor=x.limit,v()||(x.cursor=x.limit,b()),x.cursor=x.limit,_(),x.cursor=x.limit_backward,c(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.it.stemmer,"stemmer-it"),e.it.stopWordFilter=e.generateStopWordFilter("a abbia abbiamo abbiano abbiate ad agl agli ai al all alla alle allo anche avemmo avendo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto c che chi ci coi col come con contro cui da dagl dagli dai dal dall dalla dalle dallo degl degli dei del dell della delle dello di dov dove e ebbe ebbero ebbi ed era erano eravamo eravate eri ero essendo faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fece fecero feci fosse fossero fossi fossimo foste fosti fu fui fummo furono gli ha hai hanno ho i il in io l la le lei li lo loro lui ma mi mia mie miei mio ne negl negli nei nel nell nella nelle nello noi non nostra nostre nostri nostro o per perché più quale quanta quante quanti quanto quella quelle quelli quello questa queste questi questo sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete sarà sarò se sei si sia siamo siano siate siete sono sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stava stavamo stavano stavate stavi stavo stemmo stesse stessero stessi stessimo steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua sue sugl sugli sui sul sull sulla sulle sullo suo suoi ti tra tu tua tue tuo tuoi tutti tutto un una uno vi voi vostra vostre vostri vostro è".split(" ")),e.Pipeline.registerFunction(e.it.stopWordFilter,"stopWordFilter-it")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.ja.min.js b/1.3/assets/javascripts/lunr/min/lunr.ja.min.js new file mode 100644 index 00000000..5f254ebe --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.ja.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.ja=function(){this.pipeline.reset(),this.pipeline.add(e.ja.trimmer,e.ja.stopWordFilter,e.ja.stemmer),r?this.tokenizer=e.ja.tokenizer:(e.tokenizer&&(e.tokenizer=e.ja.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.ja.tokenizer))};var t=new e.TinySegmenter;e.ja.tokenizer=function(i){var n,o,s,p,a,u,m,l,c,f;if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(o=i.toString().toLowerCase().replace(/^\s+/,""),n=o.length-1;n>=0;n--)if(/\S/.test(o.charAt(n))){o=o.substring(0,n+1);break}for(a=[],s=o.length,c=0,l=0;c<=s;c++)if(u=o.charAt(c),m=c-l,u.match(/\s/)||c==s){if(m>0)for(p=t.segment(o.slice(l,c)).filter(function(e){return!!e}),f=l,n=0;n=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(e=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=e);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=e;else if(n(e))break}else if(n(e))break}function n(r){return C.cursor=r,r>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,d=_,t()||(_=C.cursor,_<3&&(_=3),t()||(d=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var r;;)if(C.bra=C.cursor,r=C.find_among(p,3))switch(C.ket=C.cursor,r){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return d<=C.cursor}function a(){var r=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-r,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var r;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.slice_del(),w=!0,a())))}function m(){var r;u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.eq_s_b(3,"gem")||(C.cursor=C.limit-r,C.slice_del(),a())))}function f(){var r,e,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,r=C.find_among_b(h,5))switch(C.bra=C.cursor,r){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(j,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(e=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-e,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,r=C.find_among_b(k,6))switch(C.bra=C.cursor,r){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(z,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var d,_,w,b=[new e("",-1,6),new e("á",0,1),new e("ä",0,1),new e("é",0,2),new e("ë",0,2),new e("í",0,3),new e("ï",0,3),new e("ó",0,4),new e("ö",0,4),new e("ú",0,5),new e("ü",0,5)],p=[new e("",-1,3),new e("I",0,2),new e("Y",0,1)],g=[new e("dd",-1,-1),new e("kk",-1,-1),new e("tt",-1,-1)],h=[new e("ene",-1,2),new e("se",-1,3),new e("en",-1,2),new e("heden",2,1),new e("s",-1,3)],k=[new e("end",-1,1),new e("ig",-1,2),new e("ing",-1,1),new e("lijk",-1,3),new e("baar",-1,4),new e("bar",-1,5)],v=[new e("aa",-1,-1),new e("ee",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(r){C.setCurrent(r)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var e=C.cursor;return r(),C.cursor=e,o(),C.limit_backward=e,C.cursor=C.limit,f(),C.cursor=C.limit_backward,s(),!0}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.nl.stemmer,"stemmer-nl"),r.nl.stopWordFilter=r.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),r.Pipeline.registerFunction(r.nl.stopWordFilter,"stopWordFilter-nl")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.no.min.js b/1.3/assets/javascripts/lunr/min/lunr.no.min.js new file mode 100644 index 00000000..92bc7e4e --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.no.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Norwegian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,r=w.cursor+3;if(a=w.limit,0<=r||r<=w.limit){for(s=r;;){if(e=w.cursor,w.in_grouping(d,97,248)){w.cursor=e;break}if(e>=w.limit)return;w.cursor=e+1}for(;!w.out_grouping(d,97,248);){if(w.cursor>=w.limit)return;w.cursor++}a=w.cursor,a=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(m,29),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:n=w.limit-w.cursor,w.in_grouping_b(c,98,122)?w.slice_del():(w.cursor=w.limit-n,w.eq_s_b(1,"k")&&w.out_grouping_b(d,97,248)&&w.slice_del());break;case 3:w.slice_from("er")}}function t(){var e,r=w.limit-w.cursor;w.cursor>=a&&(e=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,w.find_among_b(u,2)?(w.bra=w.cursor,w.limit_backward=e,w.cursor=w.limit-r,w.cursor>w.limit_backward&&(w.cursor--,w.bra=w.cursor,w.slice_del())):w.limit_backward=e)}function o(){var e,r;w.cursor>=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(l,11),e?(w.bra=w.cursor,w.limit_backward=r,1==e&&w.slice_del()):w.limit_backward=r)}var s,a,m=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],u=[new r("dt",-1,-1),new r("vt",-1,-1)],l=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],c=[119,125,149,1],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,i(),w.cursor=w.limit,t(),w.cursor=w.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.pt.min.js b/1.3/assets/javascripts/lunr/min/lunr.pt.min.js new file mode 100644 index 00000000..6c16996d --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.pt.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Portuguese` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.pt=function(){this.pipeline.reset(),this.pipeline.add(e.pt.trimmer,e.pt.stopWordFilter,e.pt.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.pt.stemmer))},e.pt.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.pt.trimmer=e.trimmerSupport.generateTrimmer(e.pt.wordCharacters),e.Pipeline.registerFunction(e.pt.trimmer,"trimmer-pt"),e.pt.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(k,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("a~");continue;case 2:z.slice_from("o~");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function n(){if(z.out_grouping(y,97,250)){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!0;z.cursor++}return!1}return!0}function i(){if(z.in_grouping(y,97,250))for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return g=z.cursor,!0}function o(){var e,r,s=z.cursor;if(z.in_grouping(y,97,250))if(e=z.cursor,n()){if(z.cursor=e,i())return}else g=z.cursor;if(z.cursor=s,z.out_grouping(y,97,250)){if(r=z.cursor,n()){if(z.cursor=r,!z.in_grouping(y,97,250)||z.cursor>=z.limit)return;z.cursor++}g=z.cursor}}function t(){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return!0}function a(){var e=z.cursor;g=z.limit,b=g,h=g,o(),z.cursor=e,t()&&(b=z.cursor,t()&&(h=z.cursor))}function u(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(q,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("ã");continue;case 2:z.slice_from("õ");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function w(){return g<=z.cursor}function m(){return b<=z.cursor}function c(){return h<=z.cursor}function l(){var e;if(z.ket=z.cursor,!(e=z.find_among_b(F,45)))return!1;switch(z.bra=z.cursor,e){case 1:if(!c())return!1;z.slice_del();break;case 2:if(!c())return!1;z.slice_from("log");break;case 3:if(!c())return!1;z.slice_from("u");break;case 4:if(!c())return!1;z.slice_from("ente");break;case 5:if(!m())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(j,4),e&&(z.bra=z.cursor,c()&&(z.slice_del(),1==e&&(z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del()))));break;case 6:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(C,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 7:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(P,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 8:if(!c())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del());break;case 9:if(!w()||!z.eq_s_b(1,"e"))return!1;z.slice_from("ir")}return!0}function f(){var e,r;if(z.cursor>=g){if(r=z.limit_backward,z.limit_backward=g,z.ket=z.cursor,e=z.find_among_b(S,120))return z.bra=z.cursor,1==e&&z.slice_del(),z.limit_backward=r,!0;z.limit_backward=r}return!1}function d(){var e;z.ket=z.cursor,(e=z.find_among_b(W,7))&&(z.bra=z.cursor,1==e&&w()&&z.slice_del())}function v(e,r){if(z.eq_s_b(1,e)){z.bra=z.cursor;var s=z.limit-z.cursor;if(z.eq_s_b(1,r))return z.cursor=z.limit-s,w()&&z.slice_del(),!1}return!0}function p(){var e;if(z.ket=z.cursor,e=z.find_among_b(L,4))switch(z.bra=z.cursor,e){case 1:w()&&(z.slice_del(),z.ket=z.cursor,z.limit-z.cursor,v("u","g")&&v("i","c"));break;case 2:z.slice_from("c")}}function _(){if(!l()&&(z.cursor=z.limit,!f()))return z.cursor=z.limit,void d();z.cursor=z.limit,z.ket=z.cursor,z.eq_s_b(1,"i")&&(z.bra=z.cursor,z.eq_s_b(1,"c")&&(z.cursor=z.limit,w()&&z.slice_del()))}var h,b,g,k=[new r("",-1,3),new r("ã",0,1),new r("õ",0,2)],q=[new r("",-1,3),new r("a~",0,1),new r("o~",0,2)],j=[new r("ic",-1,-1),new r("ad",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],C=[new r("ante",-1,1),new r("avel",-1,1),new r("ível",-1,1)],P=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],F=[new r("ica",-1,1),new r("ância",-1,1),new r("ência",-1,4),new r("ira",-1,9),new r("adora",-1,1),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,8),new r("eza",-1,1),new r("logía",-1,2),new r("idade",-1,7),new r("ante",-1,1),new r("mente",-1,6),new r("amente",12,5),new r("ável",-1,1),new r("ível",-1,1),new r("ución",-1,3),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,1),new r("imento",-1,1),new r("ivo",-1,8),new r("aça~o",-1,1),new r("ador",-1,1),new r("icas",-1,1),new r("ências",-1,4),new r("iras",-1,9),new r("adoras",-1,1),new r("osas",-1,1),new r("istas",-1,1),new r("ivas",-1,8),new r("ezas",-1,1),new r("logías",-1,2),new r("idades",-1,7),new r("uciones",-1,3),new r("adores",-1,1),new r("antes",-1,1),new r("aço~es",-1,1),new r("icos",-1,1),new r("ismos",-1,1),new r("osos",-1,1),new r("amentos",-1,1),new r("imentos",-1,1),new r("ivos",-1,8)],S=[new r("ada",-1,1),new r("ida",-1,1),new r("ia",-1,1),new r("aria",2,1),new r("eria",2,1),new r("iria",2,1),new r("ara",-1,1),new r("era",-1,1),new r("ira",-1,1),new r("ava",-1,1),new r("asse",-1,1),new r("esse",-1,1),new r("isse",-1,1),new r("aste",-1,1),new r("este",-1,1),new r("iste",-1,1),new r("ei",-1,1),new r("arei",16,1),new r("erei",16,1),new r("irei",16,1),new r("am",-1,1),new r("iam",20,1),new r("ariam",21,1),new r("eriam",21,1),new r("iriam",21,1),new r("aram",20,1),new r("eram",20,1),new r("iram",20,1),new r("avam",20,1),new r("em",-1,1),new r("arem",29,1),new r("erem",29,1),new r("irem",29,1),new r("assem",29,1),new r("essem",29,1),new r("issem",29,1),new r("ado",-1,1),new r("ido",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("indo",-1,1),new r("ara~o",-1,1),new r("era~o",-1,1),new r("ira~o",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("ir",-1,1),new r("as",-1,1),new r("adas",47,1),new r("idas",47,1),new r("ias",47,1),new r("arias",50,1),new r("erias",50,1),new r("irias",50,1),new r("aras",47,1),new r("eras",47,1),new r("iras",47,1),new r("avas",47,1),new r("es",-1,1),new r("ardes",58,1),new r("erdes",58,1),new r("irdes",58,1),new r("ares",58,1),new r("eres",58,1),new r("ires",58,1),new r("asses",58,1),new r("esses",58,1),new r("isses",58,1),new r("astes",58,1),new r("estes",58,1),new r("istes",58,1),new r("is",-1,1),new r("ais",71,1),new r("eis",71,1),new r("areis",73,1),new r("ereis",73,1),new r("ireis",73,1),new r("áreis",73,1),new r("éreis",73,1),new r("íreis",73,1),new r("ásseis",73,1),new r("ésseis",73,1),new r("ísseis",73,1),new r("áveis",73,1),new r("íeis",73,1),new r("aríeis",84,1),new r("eríeis",84,1),new r("iríeis",84,1),new r("ados",-1,1),new r("idos",-1,1),new r("amos",-1,1),new r("áramos",90,1),new r("éramos",90,1),new r("íramos",90,1),new r("ávamos",90,1),new r("íamos",90,1),new r("aríamos",95,1),new r("eríamos",95,1),new r("iríamos",95,1),new r("emos",-1,1),new r("aremos",99,1),new r("eremos",99,1),new r("iremos",99,1),new r("ássemos",99,1),new r("êssemos",99,1),new r("íssemos",99,1),new r("imos",-1,1),new r("armos",-1,1),new r("ermos",-1,1),new r("irmos",-1,1),new r("ámos",-1,1),new r("arás",-1,1),new r("erás",-1,1),new r("irás",-1,1),new r("eu",-1,1),new r("iu",-1,1),new r("ou",-1,1),new r("ará",-1,1),new r("erá",-1,1),new r("irá",-1,1)],W=[new r("a",-1,1),new r("i",-1,1),new r("o",-1,1),new r("os",-1,1),new r("á",-1,1),new r("í",-1,1),new r("ó",-1,1)],L=[new r("e",-1,1),new r("ç",-1,2),new r("é",-1,1),new r("ê",-1,1)],y=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2],z=new s;this.setCurrent=function(e){z.setCurrent(e)},this.getCurrent=function(){return z.getCurrent()},this.stem=function(){var r=z.cursor;return e(),z.cursor=r,a(),z.limit_backward=r,z.cursor=z.limit,_(),z.cursor=z.limit,p(),z.cursor=z.limit_backward,u(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.pt.stemmer,"stemmer-pt"),e.pt.stopWordFilter=e.generateStopWordFilter("a ao aos aquela aquelas aquele aqueles aquilo as até com como da das de dela delas dele deles depois do dos e ela elas ele eles em entre era eram essa essas esse esses esta estamos estas estava estavam este esteja estejam estejamos estes esteve estive estivemos estiver estivera estiveram estiverem estivermos estivesse estivessem estivéramos estivéssemos estou está estávamos estão eu foi fomos for fora foram forem formos fosse fossem fui fôramos fôssemos haja hajam hajamos havemos hei houve houvemos houver houvera houveram houverei houverem houveremos houveria houveriam houvermos houverá houverão houveríamos houvesse houvessem houvéramos houvéssemos há hão isso isto já lhe lhes mais mas me mesmo meu meus minha minhas muito na nas nem no nos nossa nossas nosso nossos num numa não nós o os ou para pela pelas pelo pelos por qual quando que quem se seja sejam sejamos sem serei seremos seria seriam será serão seríamos seu seus somos sou sua suas são só também te tem temos tenha tenham tenhamos tenho terei teremos teria teriam terá terão teríamos teu teus teve tinha tinham tive tivemos tiver tivera tiveram tiverem tivermos tivesse tivessem tivéramos tivéssemos tu tua tuas tém tínhamos um uma você vocês vos à às éramos".split(" ")),e.Pipeline.registerFunction(e.pt.stopWordFilter,"stopWordFilter-pt")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.ro.min.js b/1.3/assets/javascripts/lunr/min/lunr.ro.min.js new file mode 100644 index 00000000..72771401 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.ro.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Romanian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ro=function(){this.pipeline.reset(),this.pipeline.add(e.ro.trimmer,e.ro.stopWordFilter,e.ro.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ro.stemmer))},e.ro.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.ro.trimmer=e.trimmerSupport.generateTrimmer(e.ro.wordCharacters),e.Pipeline.registerFunction(e.ro.trimmer,"trimmer-ro"),e.ro.stemmer=function(){var i=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(e,i){L.eq_s(1,e)&&(L.ket=L.cursor,L.in_grouping(W,97,259)&&L.slice_from(i))}function n(){for(var i,r;;){if(i=L.cursor,L.in_grouping(W,97,259)&&(r=L.cursor,L.bra=r,e("u","U"),L.cursor=r,e("i","I")),L.cursor=i,L.cursor>=L.limit)break;L.cursor++}}function t(){if(L.out_grouping(W,97,259)){for(;!L.in_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}return!0}function a(){if(L.in_grouping(W,97,259))for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}function o(){var e,i,r=L.cursor;if(L.in_grouping(W,97,259)){if(e=L.cursor,!t())return void(h=L.cursor);if(L.cursor=e,!a())return void(h=L.cursor)}L.cursor=r,L.out_grouping(W,97,259)&&(i=L.cursor,t()&&(L.cursor=i,L.in_grouping(W,97,259)&&L.cursor=L.limit)return!1;L.cursor++}for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!1;L.cursor++}return!0}function c(){var e=L.cursor;h=L.limit,k=h,g=h,o(),L.cursor=e,u()&&(k=L.cursor,u()&&(g=L.cursor))}function s(){for(var e;;){if(L.bra=L.cursor,e=L.find_among(z,3))switch(L.ket=L.cursor,e){case 1:L.slice_from("i");continue;case 2:L.slice_from("u");continue;case 3:if(L.cursor>=L.limit)break;L.cursor++;continue}break}}function w(){return h<=L.cursor}function m(){return k<=L.cursor}function l(){return g<=L.cursor}function f(){var e,i;if(L.ket=L.cursor,(e=L.find_among_b(C,16))&&(L.bra=L.cursor,m()))switch(e){case 1:L.slice_del();break;case 2:L.slice_from("a");break;case 3:L.slice_from("e");break;case 4:L.slice_from("i");break;case 5:i=L.limit-L.cursor,L.eq_s_b(2,"ab")||(L.cursor=L.limit-i,L.slice_from("i"));break;case 6:L.slice_from("at");break;case 7:L.slice_from("aţi")}}function p(){var e,i=L.limit-L.cursor;if(L.ket=L.cursor,(e=L.find_among_b(P,46))&&(L.bra=L.cursor,m())){switch(e){case 1:L.slice_from("abil");break;case 2:L.slice_from("ibil");break;case 3:L.slice_from("iv");break;case 4:L.slice_from("ic");break;case 5:L.slice_from("at");break;case 6:L.slice_from("it")}return _=!0,L.cursor=L.limit-i,!0}return!1}function d(){var e,i;for(_=!1;;)if(i=L.limit-L.cursor,!p()){L.cursor=L.limit-i;break}if(L.ket=L.cursor,(e=L.find_among_b(F,62))&&(L.bra=L.cursor,l())){switch(e){case 1:L.slice_del();break;case 2:L.eq_s_b(1,"ţ")&&(L.bra=L.cursor,L.slice_from("t"));break;case 3:L.slice_from("ist")}_=!0}}function b(){var e,i,r;if(L.cursor>=h){if(i=L.limit_backward,L.limit_backward=h,L.ket=L.cursor,e=L.find_among_b(q,94))switch(L.bra=L.cursor,e){case 1:if(r=L.limit-L.cursor,!L.out_grouping_b(W,97,259)&&(L.cursor=L.limit-r,!L.eq_s_b(1,"u")))break;case 2:L.slice_del()}L.limit_backward=i}}function v(){var e;L.ket=L.cursor,(e=L.find_among_b(S,5))&&(L.bra=L.cursor,w()&&1==e&&L.slice_del())}var _,g,k,h,z=[new i("",-1,3),new i("I",0,1),new i("U",0,2)],C=[new i("ea",-1,3),new i("aţia",-1,7),new i("aua",-1,2),new i("iua",-1,4),new i("aţie",-1,7),new i("ele",-1,3),new i("ile",-1,5),new i("iile",6,4),new i("iei",-1,4),new i("atei",-1,6),new i("ii",-1,4),new i("ului",-1,1),new i("ul",-1,1),new i("elor",-1,3),new i("ilor",-1,4),new i("iilor",14,4)],P=[new i("icala",-1,4),new i("iciva",-1,4),new i("ativa",-1,5),new i("itiva",-1,6),new i("icale",-1,4),new i("aţiune",-1,5),new i("iţiune",-1,6),new i("atoare",-1,5),new i("itoare",-1,6),new i("ătoare",-1,5),new i("icitate",-1,4),new i("abilitate",-1,1),new i("ibilitate",-1,2),new i("ivitate",-1,3),new i("icive",-1,4),new i("ative",-1,5),new i("itive",-1,6),new i("icali",-1,4),new i("atori",-1,5),new i("icatori",18,4),new i("itori",-1,6),new i("ători",-1,5),new i("icitati",-1,4),new i("abilitati",-1,1),new i("ivitati",-1,3),new i("icivi",-1,4),new i("ativi",-1,5),new i("itivi",-1,6),new i("icităi",-1,4),new i("abilităi",-1,1),new i("ivităi",-1,3),new i("icităţi",-1,4),new i("abilităţi",-1,1),new i("ivităţi",-1,3),new i("ical",-1,4),new i("ator",-1,5),new i("icator",35,4),new i("itor",-1,6),new i("ător",-1,5),new i("iciv",-1,4),new i("ativ",-1,5),new i("itiv",-1,6),new i("icală",-1,4),new i("icivă",-1,4),new i("ativă",-1,5),new i("itivă",-1,6)],F=[new i("ica",-1,1),new i("abila",-1,1),new i("ibila",-1,1),new i("oasa",-1,1),new i("ata",-1,1),new i("ita",-1,1),new i("anta",-1,1),new i("ista",-1,3),new i("uta",-1,1),new i("iva",-1,1),new i("ic",-1,1),new i("ice",-1,1),new i("abile",-1,1),new i("ibile",-1,1),new i("isme",-1,3),new i("iune",-1,2),new i("oase",-1,1),new i("ate",-1,1),new i("itate",17,1),new i("ite",-1,1),new i("ante",-1,1),new i("iste",-1,3),new i("ute",-1,1),new i("ive",-1,1),new i("ici",-1,1),new i("abili",-1,1),new i("ibili",-1,1),new i("iuni",-1,2),new i("atori",-1,1),new i("osi",-1,1),new i("ati",-1,1),new i("itati",30,1),new i("iti",-1,1),new i("anti",-1,1),new i("isti",-1,3),new i("uti",-1,1),new i("işti",-1,3),new i("ivi",-1,1),new i("ităi",-1,1),new i("oşi",-1,1),new i("ităţi",-1,1),new i("abil",-1,1),new i("ibil",-1,1),new i("ism",-1,3),new i("ator",-1,1),new i("os",-1,1),new i("at",-1,1),new i("it",-1,1),new i("ant",-1,1),new i("ist",-1,3),new i("ut",-1,1),new i("iv",-1,1),new i("ică",-1,1),new i("abilă",-1,1),new i("ibilă",-1,1),new i("oasă",-1,1),new i("ată",-1,1),new i("ită",-1,1),new i("antă",-1,1),new i("istă",-1,3),new i("ută",-1,1),new i("ivă",-1,1)],q=[new i("ea",-1,1),new i("ia",-1,1),new i("esc",-1,1),new i("ăsc",-1,1),new i("ind",-1,1),new i("ând",-1,1),new i("are",-1,1),new i("ere",-1,1),new i("ire",-1,1),new i("âre",-1,1),new i("se",-1,2),new i("ase",10,1),new i("sese",10,2),new i("ise",10,1),new i("use",10,1),new i("âse",10,1),new i("eşte",-1,1),new i("ăşte",-1,1),new i("eze",-1,1),new i("ai",-1,1),new i("eai",19,1),new i("iai",19,1),new i("sei",-1,2),new i("eşti",-1,1),new i("ăşti",-1,1),new i("ui",-1,1),new i("ezi",-1,1),new i("âi",-1,1),new i("aşi",-1,1),new i("seşi",-1,2),new i("aseşi",29,1),new i("seseşi",29,2),new i("iseşi",29,1),new i("useşi",29,1),new i("âseşi",29,1),new i("işi",-1,1),new i("uşi",-1,1),new i("âşi",-1,1),new i("aţi",-1,2),new i("eaţi",38,1),new i("iaţi",38,1),new i("eţi",-1,2),new i("iţi",-1,2),new i("âţi",-1,2),new i("arăţi",-1,1),new i("serăţi",-1,2),new i("aserăţi",45,1),new i("seserăţi",45,2),new i("iserăţi",45,1),new i("userăţi",45,1),new i("âserăţi",45,1),new i("irăţi",-1,1),new i("urăţi",-1,1),new i("ârăţi",-1,1),new i("am",-1,1),new i("eam",54,1),new i("iam",54,1),new i("em",-1,2),new i("asem",57,1),new i("sesem",57,2),new i("isem",57,1),new i("usem",57,1),new i("âsem",57,1),new i("im",-1,2),new i("âm",-1,2),new i("ăm",-1,2),new i("arăm",65,1),new i("serăm",65,2),new i("aserăm",67,1),new i("seserăm",67,2),new i("iserăm",67,1),new i("userăm",67,1),new i("âserăm",67,1),new i("irăm",65,1),new i("urăm",65,1),new i("ârăm",65,1),new i("au",-1,1),new i("eau",76,1),new i("iau",76,1),new i("indu",-1,1),new i("ându",-1,1),new i("ez",-1,1),new i("ească",-1,1),new i("ară",-1,1),new i("seră",-1,2),new i("aseră",84,1),new i("seseră",84,2),new i("iseră",84,1),new i("useră",84,1),new i("âseră",84,1),new i("iră",-1,1),new i("ură",-1,1),new i("âră",-1,1),new i("ează",-1,1)],S=[new i("a",-1,1),new i("e",-1,1),new i("ie",1,1),new i("i",-1,1),new i("ă",-1,1)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var e=L.cursor;return n(),L.cursor=e,c(),L.limit_backward=e,L.cursor=L.limit,f(),L.cursor=L.limit,d(),L.cursor=L.limit,_||(L.cursor=L.limit,b(),L.cursor=L.limit),v(),L.cursor=L.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.ro.stemmer,"stemmer-ro"),e.ro.stopWordFilter=e.generateStopWordFilter("acea aceasta această aceea acei aceia acel acela acele acelea acest acesta aceste acestea aceşti aceştia acolo acord acum ai aia aibă aici al ale alea altceva altcineva am ar are asemenea asta astea astăzi asupra au avea avem aveţi azi aş aşadar aţi bine bucur bună ca care caut ce cel ceva chiar cinci cine cineva contra cu cum cumva curând curînd când cât câte câtva câţi cînd cît cîte cîtva cîţi că căci cărei căror cărui către da dacă dar datorită dată dau de deci deja deoarece departe deşi din dinaintea dintr- dintre doi doilea două drept după dă ea ei el ele eram este eu eşti face fata fi fie fiecare fii fim fiu fiţi frumos fără graţie halbă iar ieri la le li lor lui lângă lîngă mai mea mei mele mereu meu mi mie mine mult multă mulţi mulţumesc mâine mîine mă ne nevoie nici nicăieri nimeni nimeri nimic nişte noastre noastră noi noroc nostru nouă noştri nu opt ori oricare orice oricine oricum oricând oricât oricînd oricît oriunde patra patru patrulea pe pentru peste pic poate pot prea prima primul prin puţin puţina puţină până pînă rog sa sale sau se spate spre sub sunt suntem sunteţi sută sînt sîntem sînteţi să săi său ta tale te timp tine toate toată tot totuşi toţi trei treia treilea tu tăi tău un una unde undeva unei uneia unele uneori unii unor unora unu unui unuia unul vi voastre voastră voi vostru vouă voştri vreme vreo vreun vă zece zero zi zice îi îl îmi împotriva în înainte înaintea încotro încât încît între întrucât întrucît îţi ăla ălea ăsta ăstea ăştia şapte şase şi ştiu ţi ţie".split(" ")),e.Pipeline.registerFunction(e.ro.stopWordFilter,"stopWordFilter-ro")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.ru.min.js b/1.3/assets/javascripts/lunr/min/lunr.ru.min.js new file mode 100644 index 00000000..186cc485 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.ru.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Russian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ru=function(){this.pipeline.reset(),this.pipeline.add(e.ru.trimmer,e.ru.stopWordFilter,e.ru.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ru.stemmer))},e.ru.wordCharacters="Ѐ-҄҇-ԯᴫᵸⷠ-ⷿꙀ-ꚟ︮︯",e.ru.trimmer=e.trimmerSupport.generateTrimmer(e.ru.wordCharacters),e.Pipeline.registerFunction(e.ru.trimmer,"trimmer-ru"),e.ru.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,t=new function(){function e(){for(;!W.in_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function t(){for(;!W.out_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function w(){b=W.limit,_=b,e()&&(b=W.cursor,t()&&e()&&t()&&(_=W.cursor))}function i(){return _<=W.cursor}function u(e,n){var r,t;if(W.ket=W.cursor,r=W.find_among_b(e,n)){switch(W.bra=W.cursor,r){case 1:if(t=W.limit-W.cursor,!W.eq_s_b(1,"а")&&(W.cursor=W.limit-t,!W.eq_s_b(1,"я")))return!1;case 2:W.slice_del()}return!0}return!1}function o(){return u(h,9)}function s(e,n){var r;return W.ket=W.cursor,!!(r=W.find_among_b(e,n))&&(W.bra=W.cursor,1==r&&W.slice_del(),!0)}function c(){return s(g,26)}function m(){return!!c()&&(u(C,8),!0)}function f(){return s(k,2)}function l(){return u(P,46)}function a(){s(v,36)}function p(){var e;W.ket=W.cursor,(e=W.find_among_b(F,2))&&(W.bra=W.cursor,i()&&1==e&&W.slice_del())}function d(){var e;if(W.ket=W.cursor,e=W.find_among_b(q,4))switch(W.bra=W.cursor,e){case 1:if(W.slice_del(),W.ket=W.cursor,!W.eq_s_b(1,"н"))break;W.bra=W.cursor;case 2:if(!W.eq_s_b(1,"н"))break;case 3:W.slice_del()}}var _,b,h=[new n("в",-1,1),new n("ив",0,2),new n("ыв",0,2),new n("вши",-1,1),new n("ивши",3,2),new n("ывши",3,2),new n("вшись",-1,1),new n("ившись",6,2),new n("ывшись",6,2)],g=[new n("ее",-1,1),new n("ие",-1,1),new n("ое",-1,1),new n("ые",-1,1),new n("ими",-1,1),new n("ыми",-1,1),new n("ей",-1,1),new n("ий",-1,1),new n("ой",-1,1),new n("ый",-1,1),new n("ем",-1,1),new n("им",-1,1),new n("ом",-1,1),new n("ым",-1,1),new n("его",-1,1),new n("ого",-1,1),new n("ему",-1,1),new n("ому",-1,1),new n("их",-1,1),new n("ых",-1,1),new n("ею",-1,1),new n("ою",-1,1),new n("ую",-1,1),new n("юю",-1,1),new n("ая",-1,1),new n("яя",-1,1)],C=[new n("ем",-1,1),new n("нн",-1,1),new n("вш",-1,1),new n("ивш",2,2),new n("ывш",2,2),new n("щ",-1,1),new n("ющ",5,1),new n("ующ",6,2)],k=[new n("сь",-1,1),new n("ся",-1,1)],P=[new n("ла",-1,1),new n("ила",0,2),new n("ыла",0,2),new n("на",-1,1),new n("ена",3,2),new n("ете",-1,1),new n("ите",-1,2),new n("йте",-1,1),new n("ейте",7,2),new n("уйте",7,2),new n("ли",-1,1),new n("или",10,2),new n("ыли",10,2),new n("й",-1,1),new n("ей",13,2),new n("уй",13,2),new n("л",-1,1),new n("ил",16,2),new n("ыл",16,2),new n("ем",-1,1),new n("им",-1,2),new n("ым",-1,2),new n("н",-1,1),new n("ен",22,2),new n("ло",-1,1),new n("ило",24,2),new n("ыло",24,2),new n("но",-1,1),new n("ено",27,2),new n("нно",27,1),new n("ет",-1,1),new n("ует",30,2),new n("ит",-1,2),new n("ыт",-1,2),new n("ют",-1,1),new n("уют",34,2),new n("ят",-1,2),new n("ны",-1,1),new n("ены",37,2),new n("ть",-1,1),new n("ить",39,2),new n("ыть",39,2),new n("ешь",-1,1),new n("ишь",-1,2),new n("ю",-1,2),new n("ую",44,2)],v=[new n("а",-1,1),new n("ев",-1,1),new n("ов",-1,1),new n("е",-1,1),new n("ие",3,1),new n("ье",3,1),new n("и",-1,1),new n("еи",6,1),new n("ии",6,1),new n("ами",6,1),new n("ями",6,1),new n("иями",10,1),new n("й",-1,1),new n("ей",12,1),new n("ией",13,1),new n("ий",12,1),new n("ой",12,1),new n("ам",-1,1),new n("ем",-1,1),new n("ием",18,1),new n("ом",-1,1),new n("ям",-1,1),new n("иям",21,1),new n("о",-1,1),new n("у",-1,1),new n("ах",-1,1),new n("ях",-1,1),new n("иях",26,1),new n("ы",-1,1),new n("ь",-1,1),new n("ю",-1,1),new n("ию",30,1),new n("ью",30,1),new n("я",-1,1),new n("ия",33,1),new n("ья",33,1)],F=[new n("ост",-1,1),new n("ость",-1,1)],q=[new n("ейше",-1,1),new n("н",-1,2),new n("ейш",-1,1),new n("ь",-1,3)],S=[33,65,8,232],W=new r;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){return w(),W.cursor=W.limit,!(W.cursor=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursors||e>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor>1),f=0,l=o0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o=0;m--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n-_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n-_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.sv.min.js b/1.3/assets/javascripts/lunr/min/lunr.sv.min.js new file mode 100644 index 00000000..3e5eb640 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.sv.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Swedish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){function e(){var e,r=w.cursor+3;if(o=w.limit,0<=r||r<=w.limit){for(a=r;;){if(e=w.cursor,w.in_grouping(l,97,246)){w.cursor=e;break}if(w.cursor=e,w.cursor>=w.limit)return;w.cursor++}for(;!w.out_grouping(l,97,246);){if(w.cursor>=w.limit)return;w.cursor++}o=w.cursor,o=o&&(w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(u,37),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.in_grouping_b(d,98,121)&&w.slice_del()}}function i(){var e=w.limit_backward;w.cursor>=o&&(w.limit_backward=o,w.cursor=w.limit,w.find_among_b(c,7)&&(w.cursor=w.limit,w.ket=w.cursor,w.cursor>w.limit_backward&&(w.bra=--w.cursor,w.slice_del())),w.limit_backward=e)}function s(){var e,r;if(w.cursor>=o){if(r=w.limit_backward,w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(m,5))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.slice_from("lös");break;case 3:w.slice_from("full")}w.limit_backward=r}}var a,o,u=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],c=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],l=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],d=[119,127,149],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,t(),w.cursor=w.limit,i(),w.cursor=w.limit,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}}(),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.tr.min.js b/1.3/assets/javascripts/lunr/min/lunr.tr.min.js new file mode 100644 index 00000000..563f6ec1 --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.tr.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Turkish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(r,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");r.tr=function(){this.pipeline.reset(),this.pipeline.add(r.tr.trimmer,r.tr.stopWordFilter,r.tr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.tr.stemmer))},r.tr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.tr.trimmer=r.trimmerSupport.generateTrimmer(r.tr.wordCharacters),r.Pipeline.registerFunction(r.tr.trimmer,"trimmer-tr"),r.tr.stemmer=function(){var i=r.stemmerSupport.Among,e=r.stemmerSupport.SnowballProgram,n=new function(){function r(r,i,e){for(;;){var n=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(r,i,e)){Dr.cursor=Dr.limit-n;break}if(Dr.cursor=Dr.limit-n,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function n(){var i,e;i=Dr.limit-Dr.cursor,r(Wr,97,305);for(var n=0;nDr.limit_backward&&(Dr.cursor--,e=Dr.limit-Dr.cursor,i()))?(Dr.cursor=Dr.limit-e,!0):(Dr.cursor=Dr.limit-n,r()?(Dr.cursor=Dr.limit-n,!1):(Dr.cursor=Dr.limit-n,!(Dr.cursor<=Dr.limit_backward)&&(Dr.cursor--,!!i()&&(Dr.cursor=Dr.limit-n,!0))))}function u(r){return t(r,function(){return Dr.in_grouping_b(Wr,97,305)})}function o(){return u(function(){return Dr.eq_s_b(1,"n")})}function s(){return u(function(){return Dr.eq_s_b(1,"s")})}function c(){return u(function(){return Dr.eq_s_b(1,"y")})}function l(){return t(function(){return Dr.in_grouping_b(Lr,105,305)},function(){return Dr.out_grouping_b(Wr,97,305)})}function a(){return Dr.find_among_b(ur,10)&&l()}function m(){return n()&&Dr.in_grouping_b(Lr,105,305)&&s()}function d(){return Dr.find_among_b(or,2)}function f(){return n()&&Dr.in_grouping_b(Lr,105,305)&&c()}function b(){return n()&&Dr.find_among_b(sr,4)}function w(){return n()&&Dr.find_among_b(cr,4)&&o()}function _(){return n()&&Dr.find_among_b(lr,2)&&c()}function k(){return n()&&Dr.find_among_b(ar,2)}function p(){return n()&&Dr.find_among_b(mr,4)}function g(){return n()&&Dr.find_among_b(dr,2)}function y(){return n()&&Dr.find_among_b(fr,4)}function z(){return n()&&Dr.find_among_b(br,2)}function v(){return n()&&Dr.find_among_b(wr,2)&&c()}function h(){return Dr.eq_s_b(2,"ki")}function q(){return n()&&Dr.find_among_b(_r,2)&&o()}function C(){return n()&&Dr.find_among_b(kr,4)&&c()}function P(){return n()&&Dr.find_among_b(pr,4)}function F(){return n()&&Dr.find_among_b(gr,4)&&c()}function S(){return Dr.find_among_b(yr,4)}function W(){return n()&&Dr.find_among_b(zr,2)}function L(){return n()&&Dr.find_among_b(vr,4)}function x(){return n()&&Dr.find_among_b(hr,8)}function A(){return Dr.find_among_b(qr,2)}function E(){return n()&&Dr.find_among_b(Cr,32)&&c()}function j(){return Dr.find_among_b(Pr,8)&&c()}function T(){return n()&&Dr.find_among_b(Fr,4)&&c()}function Z(){return Dr.eq_s_b(3,"ken")&&c()}function B(){var r=Dr.limit-Dr.cursor;return!(T()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,Z()))))}function D(){if(A()){var r=Dr.limit-Dr.cursor;if(S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T())return!1}return!0}function G(){if(W()){Dr.bra=Dr.cursor,Dr.slice_del();var r=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,x()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,T()||(Dr.cursor=Dr.limit-r)))),nr=!1,!1}return!0}function H(){if(!L())return!0;var r=Dr.limit-Dr.cursor;return!E()&&(Dr.cursor=Dr.limit-r,!j())}function I(){var r,i=Dr.limit-Dr.cursor;return!(S()||(Dr.cursor=Dr.limit-i,F()||(Dr.cursor=Dr.limit-i,P()||(Dr.cursor=Dr.limit-i,C()))))||(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,T()||(Dr.cursor=Dr.limit-r),!1)}function J(){var r,i=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,nr=!0,B()&&(Dr.cursor=Dr.limit-i,D()&&(Dr.cursor=Dr.limit-i,G()&&(Dr.cursor=Dr.limit-i,H()&&(Dr.cursor=Dr.limit-i,I()))))){if(Dr.cursor=Dr.limit-i,!x())return;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T()||(Dr.cursor=Dr.limit-r)}Dr.bra=Dr.cursor,Dr.slice_del()}function K(){var r,i,e,n;if(Dr.ket=Dr.cursor,h()){if(r=Dr.limit-Dr.cursor,p())return Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,a()&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))),!0;if(Dr.cursor=Dr.limit-r,w()){if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,e=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-e,!m()&&(Dr.cursor=Dr.limit-e,!K())))return!0;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}return!0}if(Dr.cursor=Dr.limit-r,g()){if(n=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-n,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-n,!K())return!1;return!0}}return!1}function M(r){if(Dr.ket=Dr.cursor,!g()&&(Dr.cursor=Dr.limit-r,!k()))return!1;var i=Dr.limit-Dr.cursor;if(d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-i,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-i,!K())return!1;return!0}function N(r){if(Dr.ket=Dr.cursor,!z()&&(Dr.cursor=Dr.limit-r,!b()))return!1;var i=Dr.limit-Dr.cursor;return!(!m()&&(Dr.cursor=Dr.limit-i,!d()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)}function O(){var r,i=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,!(!w()&&(Dr.cursor=Dr.limit-i,!v()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,!(!W()||(Dr.bra=Dr.cursor,Dr.slice_del(),!K()))||(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!(a()||(Dr.cursor=Dr.limit-r,m()||(Dr.cursor=Dr.limit-r,K())))||(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)))}function Q(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,!p()&&(Dr.cursor=Dr.limit-e,!f()&&(Dr.cursor=Dr.limit-e,!_())))return!1;if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,a())Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()||(Dr.cursor=Dr.limit-i);else if(Dr.cursor=Dr.limit-r,!W())return!0;return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,K(),!0}function R(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,W())return Dr.bra=Dr.cursor,Dr.slice_del(),void K();if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,q())if(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-r,!m())){if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!W())return;if(Dr.bra=Dr.cursor,Dr.slice_del(),!K())return}Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}else if(Dr.cursor=Dr.limit-e,!M(e)&&(Dr.cursor=Dr.limit-e,!N(e))){if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,y())return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,i=Dr.limit-Dr.cursor,void(a()?(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())):(Dr.cursor=Dr.limit-i,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,K())));if(Dr.cursor=Dr.limit-e,!O()){if(Dr.cursor=Dr.limit-e,d())return Dr.bra=Dr.cursor,void Dr.slice_del();Dr.cursor=Dr.limit-e,K()||(Dr.cursor=Dr.limit-e,Q()||(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,(a()||(Dr.cursor=Dr.limit-e,m()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))))}}}function U(){var r;if(Dr.ket=Dr.cursor,r=Dr.find_among_b(Sr,4))switch(Dr.bra=Dr.cursor,r){case 1:Dr.slice_from("p");break;case 2:Dr.slice_from("ç");break;case 3:Dr.slice_from("t");break;case 4:Dr.slice_from("k")}}function V(){for(;;){var r=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(Wr,97,305)){Dr.cursor=Dr.limit-r;break}if(Dr.cursor=Dr.limit-r,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function X(r,i,e){if(Dr.cursor=Dr.limit-r,V()){var n=Dr.limit-Dr.cursor;if(!Dr.eq_s_b(1,i)&&(Dr.cursor=Dr.limit-n,!Dr.eq_s_b(1,e)))return!0;Dr.cursor=Dr.limit-r;var t=Dr.cursor;return Dr.insert(Dr.cursor,Dr.cursor,e),Dr.cursor=t,!1}return!0}function Y(){var r=Dr.limit-Dr.cursor;(Dr.eq_s_b(1,"d")||(Dr.cursor=Dr.limit-r,Dr.eq_s_b(1,"g")))&&X(r,"a","ı")&&X(r,"e","i")&&X(r,"o","u")&&X(r,"ö","ü")}function $(){for(var r,i=Dr.cursor,e=2;;){for(r=Dr.cursor;!Dr.in_grouping(Wr,97,305);){if(Dr.cursor>=Dr.limit)return Dr.cursor=r,!(e>0)&&(Dr.cursor=i,!0);Dr.cursor++}e--}}function rr(r,i,e){for(;!Dr.eq_s(i,e);){if(Dr.cursor>=Dr.limit)return!0;Dr.cursor++}return(tr=i)!=Dr.limit||(Dr.cursor=r,!1)}function ir(){var r=Dr.cursor;return!rr(r,2,"ad")||(Dr.cursor=r,!rr(r,5,"soyad"))}function er(){var r=Dr.cursor;return!ir()&&(Dr.limit_backward=r,Dr.cursor=Dr.limit,Y(),Dr.cursor=Dr.limit,U(),!0)}var nr,tr,ur=[new i("m",-1,-1),new i("n",-1,-1),new i("miz",-1,-1),new i("niz",-1,-1),new i("muz",-1,-1),new i("nuz",-1,-1),new i("müz",-1,-1),new i("nüz",-1,-1),new i("mız",-1,-1),new i("nız",-1,-1)],or=[new i("leri",-1,-1),new i("ları",-1,-1)],sr=[new i("ni",-1,-1),new i("nu",-1,-1),new i("nü",-1,-1),new i("nı",-1,-1)],cr=[new i("in",-1,-1),new i("un",-1,-1),new i("ün",-1,-1),new i("ın",-1,-1)],lr=[new i("a",-1,-1),new i("e",-1,-1)],ar=[new i("na",-1,-1),new i("ne",-1,-1)],mr=[new i("da",-1,-1),new i("ta",-1,-1),new i("de",-1,-1),new i("te",-1,-1)],dr=[new i("nda",-1,-1),new i("nde",-1,-1)],fr=[new i("dan",-1,-1),new i("tan",-1,-1),new i("den",-1,-1),new i("ten",-1,-1)],br=[new i("ndan",-1,-1),new i("nden",-1,-1)],wr=[new i("la",-1,-1),new i("le",-1,-1)],_r=[new i("ca",-1,-1),new i("ce",-1,-1)],kr=[new i("im",-1,-1),new i("um",-1,-1),new i("üm",-1,-1),new i("ım",-1,-1)],pr=[new i("sin",-1,-1),new i("sun",-1,-1),new i("sün",-1,-1),new i("sın",-1,-1)],gr=[new i("iz",-1,-1),new i("uz",-1,-1),new i("üz",-1,-1),new i("ız",-1,-1)],yr=[new i("siniz",-1,-1),new i("sunuz",-1,-1),new i("sünüz",-1,-1),new i("sınız",-1,-1)],zr=[new i("lar",-1,-1),new i("ler",-1,-1)],vr=[new i("niz",-1,-1),new i("nuz",-1,-1),new i("nüz",-1,-1),new i("nız",-1,-1)],hr=[new i("dir",-1,-1),new i("tir",-1,-1),new i("dur",-1,-1),new i("tur",-1,-1),new i("dür",-1,-1),new i("tür",-1,-1),new i("dır",-1,-1),new i("tır",-1,-1)],qr=[new i("casına",-1,-1),new i("cesine",-1,-1)],Cr=[new i("di",-1,-1),new i("ti",-1,-1),new i("dik",-1,-1),new i("tik",-1,-1),new i("duk",-1,-1),new i("tuk",-1,-1),new i("dük",-1,-1),new i("tük",-1,-1),new i("dık",-1,-1),new i("tık",-1,-1),new i("dim",-1,-1),new i("tim",-1,-1),new i("dum",-1,-1),new i("tum",-1,-1),new i("düm",-1,-1),new i("tüm",-1,-1),new i("dım",-1,-1),new i("tım",-1,-1),new i("din",-1,-1),new i("tin",-1,-1),new i("dun",-1,-1),new i("tun",-1,-1),new i("dün",-1,-1),new i("tün",-1,-1),new i("dın",-1,-1),new i("tın",-1,-1),new i("du",-1,-1),new i("tu",-1,-1),new i("dü",-1,-1),new i("tü",-1,-1),new i("dı",-1,-1),new i("tı",-1,-1)],Pr=[new i("sa",-1,-1),new i("se",-1,-1),new i("sak",-1,-1),new i("sek",-1,-1),new i("sam",-1,-1),new i("sem",-1,-1),new i("san",-1,-1),new i("sen",-1,-1)],Fr=[new i("miş",-1,-1),new i("muş",-1,-1),new i("müş",-1,-1),new i("mış",-1,-1)],Sr=[new i("b",-1,1),new i("c",-1,2),new i("d",-1,3),new i("ğ",-1,4)],Wr=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1],Lr=[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1],xr=[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],Ar=[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130],Er=[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],jr=[17],Tr=[65],Zr=[65],Br=[["a",xr,97,305],["e",Ar,101,252],["ı",Er,97,305],["i",jr,101,105],["o",Tr,111,117],["ö",Zr,246,252],["u",Tr,111,117]],Dr=new e;this.setCurrent=function(r){Dr.setCurrent(r)},this.getCurrent=function(){return Dr.getCurrent()},this.stem=function(){return!!($()&&(Dr.limit_backward=Dr.cursor,Dr.cursor=Dr.limit,J(),Dr.cursor=Dr.limit,nr&&(R(),Dr.cursor=Dr.limit_backward,er())))}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.tr.stemmer,"stemmer-tr"),r.tr.stopWordFilter=r.generateStopWordFilter("acaba altmış altı ama ancak arada aslında ayrıca bana bazı belki ben benden beni benim beri beş bile bin bir biri birkaç birkez birçok birşey birşeyi biz bizden bize bizi bizim bu buna bunda bundan bunlar bunları bunların bunu bunun burada böyle böylece da daha dahi de defa değil diye diğer doksan dokuz dolayı dolayısıyla dört edecek eden ederek edilecek ediliyor edilmesi ediyor elli en etmesi etti ettiği ettiğini eğer gibi göre halen hangi hatta hem henüz hep hepsi her herhangi herkesin hiç hiçbir iki ile ilgili ise itibaren itibariyle için işte kadar karşın katrilyon kendi kendilerine kendini kendisi kendisine kendisini kez ki kim kimden kime kimi kimse kırk milyar milyon mu mü mı nasıl ne neden nedenle nerde nerede nereye niye niçin o olan olarak oldu olduklarını olduğu olduğunu olmadı olmadığı olmak olması olmayan olmaz olsa olsun olup olur olursa oluyor on ona ondan onlar onlardan onları onların onu onun otuz oysa pek rağmen sadece sanki sekiz seksen sen senden seni senin siz sizden sizi sizin tarafından trilyon tüm var vardı ve veya ya yani yapacak yapmak yaptı yaptıkları yaptığı yaptığını yapılan yapılması yapıyor yedi yerine yetmiş yine yirmi yoksa yüz zaten çok çünkü öyle üzere üç şey şeyden şeyi şeyler şu şuna şunda şundan şunları şunu şöyle".split(" ")),r.Pipeline.registerFunction(r.tr.stopWordFilter,"stopWordFilter-tr")}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/min/lunr.vi.min.js b/1.3/assets/javascripts/lunr/min/lunr.vi.min.js new file mode 100644 index 00000000..22aed28c --- /dev/null +++ b/1.3/assets/javascripts/lunr/min/lunr.vi.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.vi=function(){this.pipeline.reset(),this.pipeline.add(e.vi.stopWordFilter,e.vi.trimmer)},e.vi.wordCharacters="[A-Za-ẓ̀͐́͑̉̃̓ÂâÊêÔôĂ-ăĐ-đƠ-ơƯ-ư]",e.vi.trimmer=e.trimmerSupport.generateTrimmer(e.vi.wordCharacters),e.Pipeline.registerFunction(e.vi.trimmer,"trimmer-vi"),e.vi.stopWordFilter=e.generateStopWordFilter("là cái nhưng mà".split(" "))}}); \ No newline at end of file diff --git a/1.3/assets/javascripts/lunr/tinyseg.js b/1.3/assets/javascripts/lunr/tinyseg.js new file mode 100644 index 00000000..167fa6dd --- /dev/null +++ b/1.3/assets/javascripts/lunr/tinyseg.js @@ -0,0 +1,206 @@ +/** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ +;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + factory()(root.lunr); + } +}(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + + return function(lunr) { + // TinySegmenter 0.1 -- Super compact Japanese tokenizer in Javascript + // (c) 2008 Taku Kudo + // TinySegmenter is freely distributable under the terms of a new BSD licence. + // For details, see http://chasen.org/~taku/software/TinySegmenter/LICENCE.txt + + function TinySegmenter() { + var patterns = { + "[一二三四五六七八九十百千万億兆]":"M", + "[一-龠々〆ヵヶ]":"H", + "[ぁ-ん]":"I", + "[ァ-ヴーア-ン゙ー]":"K", + "[a-zA-Za-zA-Z]":"A", + "[0-90-9]":"N" + } + this.chartype_ = []; + for (var i in patterns) { + var regexp = new RegExp(i); + this.chartype_.push([regexp, patterns[i]]); + } + + this.BIAS__ = -332 + this.BC1__ = {"HH":6,"II":2461,"KH":406,"OH":-1378}; + this.BC2__ = {"AA":-3267,"AI":2744,"AN":-878,"HH":-4070,"HM":-1711,"HN":4012,"HO":3761,"IA":1327,"IH":-1184,"II":-1332,"IK":1721,"IO":5492,"KI":3831,"KK":-8741,"MH":-3132,"MK":3334,"OO":-2920}; + this.BC3__ = {"HH":996,"HI":626,"HK":-721,"HN":-1307,"HO":-836,"IH":-301,"KK":2762,"MK":1079,"MM":4034,"OA":-1652,"OH":266}; + this.BP1__ = {"BB":295,"OB":304,"OO":-125,"UB":352}; + this.BP2__ = {"BO":60,"OO":-1762}; + this.BQ1__ = {"BHH":1150,"BHM":1521,"BII":-1158,"BIM":886,"BMH":1208,"BNH":449,"BOH":-91,"BOO":-2597,"OHI":451,"OIH":-296,"OKA":1851,"OKH":-1020,"OKK":904,"OOO":2965}; + this.BQ2__ = {"BHH":118,"BHI":-1159,"BHM":466,"BIH":-919,"BKK":-1720,"BKO":864,"OHH":-1139,"OHM":-181,"OIH":153,"UHI":-1146}; + this.BQ3__ = {"BHH":-792,"BHI":2664,"BII":-299,"BKI":419,"BMH":937,"BMM":8335,"BNN":998,"BOH":775,"OHH":2174,"OHM":439,"OII":280,"OKH":1798,"OKI":-793,"OKO":-2242,"OMH":-2402,"OOO":11699}; + this.BQ4__ = {"BHH":-3895,"BIH":3761,"BII":-4654,"BIK":1348,"BKK":-1806,"BMI":-3385,"BOO":-12396,"OAH":926,"OHH":266,"OHK":-2036,"ONN":-973}; + this.BW1__ = {",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682}; + this.BW2__ = {"..":-11822,"11":-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669}; + this.BW3__ = {"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1000,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990}; + this.TC1__ = {"AAA":1093,"HHH":1029,"HHM":580,"HII":998,"HOH":-390,"HOM":-331,"IHI":1169,"IOH":-142,"IOI":-1015,"IOM":467,"MMH":187,"OOI":-1832}; + this.TC2__ = {"HHO":2088,"HII":-1023,"HMM":-1154,"IHI":-1965,"KKH":703,"OII":-2649}; + this.TC3__ = {"AAA":-294,"HHH":346,"HHI":-341,"HII":-1088,"HIK":731,"HOH":-1486,"IHH":128,"IHI":-3041,"IHO":-1935,"IIH":-825,"IIM":-1035,"IOI":-542,"KHH":-1216,"KKA":491,"KKH":-1217,"KOK":-1009,"MHH":-2694,"MHM":-457,"MHO":123,"MMH":-471,"NNH":-1689,"NNO":662,"OHO":-3393}; + this.TC4__ = {"HHH":-203,"HHI":1344,"HHK":365,"HHM":-122,"HHN":182,"HHO":669,"HIH":804,"HII":679,"HOH":446,"IHH":695,"IHO":-2324,"IIH":321,"III":1497,"IIO":656,"IOO":54,"KAK":4845,"KKA":3386,"KKK":3065,"MHH":-405,"MHI":201,"MMH":-241,"MMM":661,"MOM":841}; + this.TQ1__ = {"BHHH":-227,"BHHI":316,"BHIH":-132,"BIHH":60,"BIII":1595,"BNHH":-744,"BOHH":225,"BOOO":-908,"OAKK":482,"OHHH":281,"OHIH":249,"OIHI":200,"OIIH":-68}; + this.TQ2__ = {"BIHH":-1401,"BIII":-1033,"BKAK":-543,"BOOO":-5591}; + this.TQ3__ = {"BHHH":478,"BHHM":-1073,"BHIH":222,"BHII":-504,"BIIH":-116,"BIII":-105,"BMHI":-863,"BMHM":-464,"BOMH":620,"OHHH":346,"OHHI":1729,"OHII":997,"OHMH":481,"OIHH":623,"OIIH":1344,"OKAK":2792,"OKHH":587,"OKKA":679,"OOHH":110,"OOII":-685}; + this.TQ4__ = {"BHHH":-721,"BHHM":-3604,"BHII":-966,"BIIH":-607,"BIII":-2181,"OAAA":-2763,"OAKK":180,"OHHH":-294,"OHHI":2446,"OHHO":480,"OHIH":-1573,"OIHH":1935,"OIHI":-493,"OIIH":626,"OIII":-4007,"OKAK":-8156}; + this.TW1__ = {"につい":-4681,"東京都":2026}; + this.TW2__ = {"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216}; + this.TW3__ = {"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287}; + this.TW4__ = {"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865}; + this.UC1__ = {"A":484,"K":93,"M":645,"O":-505}; + this.UC2__ = {"A":819,"H":1059,"I":409,"M":3987,"N":5775,"O":646}; + this.UC3__ = {"A":-1370,"I":2311}; + this.UC4__ = {"A":-2643,"H":1809,"I":-1032,"K":-3450,"M":3565,"N":3876,"O":6646}; + this.UC5__ = {"H":313,"I":-1238,"K":-799,"M":539,"O":-831}; + this.UC6__ = {"H":-506,"I":-253,"K":87,"M":247,"O":-387}; + this.UP1__ = {"O":-214}; + this.UP2__ = {"B":69,"O":935}; + this.UP3__ = {"B":189}; + this.UQ1__ = {"BH":21,"BI":-12,"BK":-99,"BN":142,"BO":-56,"OH":-95,"OI":477,"OK":410,"OO":-2422}; + this.UQ2__ = {"BH":216,"BI":113,"OK":1759}; + this.UQ3__ = {"BA":-479,"BH":42,"BI":1913,"BK":-7198,"BM":3160,"BN":6427,"BO":14761,"OI":-827,"ON":-3212}; + this.UW1__ = {",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135}; + this.UW2__ = {",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568}; + this.UW3__ = {",":4889,"1":-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278}; + this.UW4__ = {",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1000,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637}; + this.UW5__ = {",":465,".":-299,"1":-514,"E2":-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343}; + this.UW6__ = {",":227,".":808,"1":-270,"E1":306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496}; + + return this; + } + TinySegmenter.prototype.ctype_ = function(str) { + for (var i in this.chartype_) { + if (str.match(this.chartype_[i][0])) { + return this.chartype_[i][1]; + } + } + return "O"; + } + + TinySegmenter.prototype.ts_ = function(v) { + if (v) { return v; } + return 0; + } + + TinySegmenter.prototype.segment = function(input) { + if (input == null || input == undefined || input == "") { + return []; + } + var result = []; + var seg = ["B3","B2","B1"]; + var ctype = ["O","O","O"]; + var o = input.split(""); + for (i = 0; i < o.length; ++i) { + seg.push(o[i]); + ctype.push(this.ctype_(o[i])) + } + seg.push("E1"); + seg.push("E2"); + seg.push("E3"); + ctype.push("O"); + ctype.push("O"); + ctype.push("O"); + var word = seg[3]; + var p1 = "U"; + var p2 = "U"; + var p3 = "U"; + for (var i = 4; i < seg.length - 3; ++i) { + var score = this.BIAS__; + var w1 = seg[i-3]; + var w2 = seg[i-2]; + var w3 = seg[i-1]; + var w4 = seg[i]; + var w5 = seg[i+1]; + var w6 = seg[i+2]; + var c1 = ctype[i-3]; + var c2 = ctype[i-2]; + var c3 = ctype[i-1]; + var c4 = ctype[i]; + var c5 = ctype[i+1]; + var c6 = ctype[i+2]; + score += this.ts_(this.UP1__[p1]); + score += this.ts_(this.UP2__[p2]); + score += this.ts_(this.UP3__[p3]); + score += this.ts_(this.BP1__[p1 + p2]); + score += this.ts_(this.BP2__[p2 + p3]); + score += this.ts_(this.UW1__[w1]); + score += this.ts_(this.UW2__[w2]); + score += this.ts_(this.UW3__[w3]); + score += this.ts_(this.UW4__[w4]); + score += this.ts_(this.UW5__[w5]); + score += this.ts_(this.UW6__[w6]); + score += this.ts_(this.BW1__[w2 + w3]); + score += this.ts_(this.BW2__[w3 + w4]); + score += this.ts_(this.BW3__[w4 + w5]); + score += this.ts_(this.TW1__[w1 + w2 + w3]); + score += this.ts_(this.TW2__[w2 + w3 + w4]); + score += this.ts_(this.TW3__[w3 + w4 + w5]); + score += this.ts_(this.TW4__[w4 + w5 + w6]); + score += this.ts_(this.UC1__[c1]); + score += this.ts_(this.UC2__[c2]); + score += this.ts_(this.UC3__[c3]); + score += this.ts_(this.UC4__[c4]); + score += this.ts_(this.UC5__[c5]); + score += this.ts_(this.UC6__[c6]); + score += this.ts_(this.BC1__[c2 + c3]); + score += this.ts_(this.BC2__[c3 + c4]); + score += this.ts_(this.BC3__[c4 + c5]); + score += this.ts_(this.TC1__[c1 + c2 + c3]); + score += this.ts_(this.TC2__[c2 + c3 + c4]); + score += this.ts_(this.TC3__[c3 + c4 + c5]); + score += this.ts_(this.TC4__[c4 + c5 + c6]); + // score += this.ts_(this.TC5__[c4 + c5 + c6]); + score += this.ts_(this.UQ1__[p1 + c1]); + score += this.ts_(this.UQ2__[p2 + c2]); + score += this.ts_(this.UQ3__[p3 + c3]); + score += this.ts_(this.BQ1__[p2 + c2 + c3]); + score += this.ts_(this.BQ2__[p2 + c3 + c4]); + score += this.ts_(this.BQ3__[p3 + c2 + c3]); + score += this.ts_(this.BQ4__[p3 + c3 + c4]); + score += this.ts_(this.TQ1__[p2 + c1 + c2 + c3]); + score += this.ts_(this.TQ2__[p2 + c2 + c3 + c4]); + score += this.ts_(this.TQ3__[p3 + c1 + c2 + c3]); + score += this.ts_(this.TQ4__[p3 + c2 + c3 + c4]); + var p = "O"; + if (score > 0) { + result.push(word); + word = ""; + p = "B"; + } + p1 = p2; + p2 = p3; + p3 = p; + word += seg[i]; + } + result.push(word); + + return result; + } + + lunr.TinySegmenter = TinySegmenter; + }; + +})); \ No newline at end of file diff --git a/1.3/assets/javascripts/workers/search.df8cae7d.min.js b/1.3/assets/javascripts/workers/search.df8cae7d.min.js new file mode 100644 index 00000000..4197eccc --- /dev/null +++ b/1.3/assets/javascripts/workers/search.df8cae7d.min.js @@ -0,0 +1,59 @@ +(()=>{var ge=Object.create,U=Object.defineProperty,ye=Object.defineProperties,me=Object.getOwnPropertyDescriptor,ve=Object.getOwnPropertyDescriptors,xe=Object.getOwnPropertyNames,Y=Object.getOwnPropertySymbols,Se=Object.getPrototypeOf,G=Object.prototype.hasOwnProperty,Qe=Object.prototype.propertyIsEnumerable;var J=(t,e,r)=>e in t?U(t,e,{enumerable:!0,configurable:!0,writable:!0,value:r}):t[e]=r,j=(t,e)=>{for(var r in e||(e={}))G.call(e,r)&&J(t,r,e[r]);if(Y)for(var r of Y(e))Qe.call(e,r)&&J(t,r,e[r]);return t},X=(t,e)=>ye(t,ve(e)),be=t=>U(t,"__esModule",{value:!0});var Z=(t,e)=>()=>(e||t((e={exports:{}}).exports,e),e.exports);var we=(t,e,r)=>{if(e&&typeof e=="object"||typeof e=="function")for(let n of xe(e))!G.call(t,n)&&n!=="default"&&U(t,n,{get:()=>e[n],enumerable:!(r=me(e,n))||r.enumerable});return t},K=t=>we(be(U(t!=null?ge(Se(t)):{},"default",t&&t.__esModule&&"default"in t?{get:()=>t.default,enumerable:!0}:{value:t,enumerable:!0})),t);var W=(t,e,r)=>new Promise((n,i)=>{var s=u=>{try{a(r.next(u))}catch(c){i(c)}},o=u=>{try{a(r.throw(u))}catch(c){i(c)}},a=u=>u.done?n(u.value):Promise.resolve(u.value).then(s,o);a((r=r.apply(t,e)).next())});var re=Z((ee,te)=>{(function(){var t=function(e){var r=new t.Builder;return r.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),r.searchPipeline.add(t.stemmer),e.call(r,r),r.build()};t.version="2.3.9";t.utils={},t.utils.warn=function(e){return function(r){e.console&&console.warn&&console.warn(r)}}(this),t.utils.asString=function(e){return e==null?"":e.toString()},t.utils.clone=function(e){if(e==null)return e;for(var r=Object.create(null),n=Object.keys(e),i=0;i0){var h=t.utils.clone(r)||{};h.position=[a,c],h.index=s.length,s.push(new t.Token(n.slice(a,o),h))}a=o+1}}return s},t.tokenizer.separator=/[\s\-]+/;t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions=Object.create(null),t.Pipeline.registerFunction=function(e,r){r in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+r),e.label=r,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var r=e.label&&e.label in this.registeredFunctions;r||t.utils.warn(`Function is not registered with pipeline. This may cause problems when serialising the index. +`,e)},t.Pipeline.load=function(e){var r=new t.Pipeline;return e.forEach(function(n){var i=t.Pipeline.registeredFunctions[n];if(i)r.add(i);else throw new Error("Cannot load unregistered function: "+n)}),r},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(r){t.Pipeline.warnIfFunctionNotRegistered(r),this._stack.push(r)},this)},t.Pipeline.prototype.after=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");n=n+1,this._stack.splice(n,0,r)},t.Pipeline.prototype.before=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,r)},t.Pipeline.prototype.remove=function(e){var r=this._stack.indexOf(e);r!=-1&&this._stack.splice(r,1)},t.Pipeline.prototype.run=function(e){for(var r=this._stack.length,n=0;n1&&(oe&&(n=s),o!=e);)i=n-r,s=r+Math.floor(i/2),o=this.elements[s*2];if(o==e||o>e)return s*2;if(ou?h+=2:a==u&&(r+=n[c+1]*i[h+1],c+=2,h+=2);return r},t.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},t.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),r=1,n=0;r0){var o=s.str.charAt(0),a;o in s.node.edges?a=s.node.edges[o]:(a=new t.TokenSet,s.node.edges[o]=a),s.str.length==1&&(a.final=!0),i.push({node:a,editsRemaining:s.editsRemaining,str:s.str.slice(1)})}if(s.editsRemaining!=0){if("*"in s.node.edges)var u=s.node.edges["*"];else{var u=new t.TokenSet;s.node.edges["*"]=u}if(s.str.length==0&&(u.final=!0),i.push({node:u,editsRemaining:s.editsRemaining-1,str:s.str}),s.str.length>1&&i.push({node:s.node,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)}),s.str.length==1&&(s.node.final=!0),s.str.length>=1){if("*"in s.node.edges)var c=s.node.edges["*"];else{var c=new t.TokenSet;s.node.edges["*"]=c}s.str.length==1&&(c.final=!0),i.push({node:c,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)})}if(s.str.length>1){var h=s.str.charAt(0),y=s.str.charAt(1),g;y in s.node.edges?g=s.node.edges[y]:(g=new t.TokenSet,s.node.edges[y]=g),s.str.length==1&&(g.final=!0),i.push({node:g,editsRemaining:s.editsRemaining-1,str:h+s.str.slice(2)})}}}return n},t.TokenSet.fromString=function(e){for(var r=new t.TokenSet,n=r,i=0,s=e.length;i=e;r--){var n=this.uncheckedNodes[r],i=n.child.toString();i in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[i]:(n.child._str=i,this.minimizedNodes[i]=n.child),this.uncheckedNodes.pop()}};t.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},t.Index.prototype.search=function(e){return this.query(function(r){var n=new t.QueryParser(e,r);n.parse()})},t.Index.prototype.query=function(e){for(var r=new t.Query(this.fields),n=Object.create(null),i=Object.create(null),s=Object.create(null),o=Object.create(null),a=Object.create(null),u=0;u1?this._b=1:this._b=e},t.Builder.prototype.k1=function(e){this._k1=e},t.Builder.prototype.add=function(e,r){var n=e[this._ref],i=Object.keys(this._fields);this._documents[n]=r||{},this.documentCount+=1;for(var s=0;s=this.length)return t.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},t.QueryLexer.prototype.width=function(){return this.pos-this.start},t.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},t.QueryLexer.prototype.backup=function(){this.pos-=1},t.QueryLexer.prototype.acceptDigitRun=function(){var e,r;do e=this.next(),r=e.charCodeAt(0);while(r>47&&r<58);e!=t.QueryLexer.EOS&&this.backup()},t.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(t.QueryLexer.TERM)),e.ignore(),e.more())return t.QueryLexer.lexText},t.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.EDIT_DISTANCE),t.QueryLexer.lexText},t.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.BOOST),t.QueryLexer.lexText},t.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(t.QueryLexer.TERM)},t.QueryLexer.termSeparator=t.tokenizer.separator,t.QueryLexer.lexText=function(e){for(;;){var r=e.next();if(r==t.QueryLexer.EOS)return t.QueryLexer.lexEOS;if(r.charCodeAt(0)==92){e.escapeCharacter();continue}if(r==":")return t.QueryLexer.lexField;if(r=="~")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexEditDistance;if(r=="^")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexBoost;if(r=="+"&&e.width()===1||r=="-"&&e.width()===1)return e.emit(t.QueryLexer.PRESENCE),t.QueryLexer.lexText;if(r.match(t.QueryLexer.termSeparator))return t.QueryLexer.lexTerm}},t.QueryParser=function(e,r){this.lexer=new t.QueryLexer(e),this.query=r,this.currentClause={},this.lexemeIdx=0},t.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=t.QueryParser.parseClause;e;)e=e(this);return this.query},t.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},t.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},t.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},t.QueryParser.parseClause=function(e){var r=e.peekLexeme();if(r!=null)switch(r.type){case t.QueryLexer.PRESENCE:return t.QueryParser.parsePresence;case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+r.type;throw r.str.length>=1&&(n+=" with value '"+r.str+"'"),new t.QueryParseError(n,r.start,r.end)}},t.QueryParser.parsePresence=function(e){var r=e.consumeLexeme();if(r!=null){switch(r.str){case"-":e.currentClause.presence=t.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=t.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+r.str+"'";throw new t.QueryParseError(n,r.start,r.end)}var i=e.peekLexeme();if(i==null){var n="expecting term or field, found nothing";throw new t.QueryParseError(n,r.start,r.end)}switch(i.type){case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expecting term or field, found '"+i.type+"'";throw new t.QueryParseError(n,i.start,i.end)}}},t.QueryParser.parseField=function(e){var r=e.consumeLexeme();if(r!=null){if(e.query.allFields.indexOf(r.str)==-1){var n=e.query.allFields.map(function(o){return"'"+o+"'"}).join(", "),i="unrecognised field '"+r.str+"', possible fields: "+n;throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.fields=[r.str];var s=e.peekLexeme();if(s==null){var i="expecting term, found nothing";throw new t.QueryParseError(i,r.start,r.end)}switch(s.type){case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var i="expecting term, found '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseTerm=function(e){var r=e.consumeLexeme();if(r!=null){e.currentClause.term=r.str.toLowerCase(),r.str.indexOf("*")!=-1&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(n==null){e.nextClause();return}switch(n.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+n.type+"'";throw new t.QueryParseError(i,n.start,n.end)}}},t.QueryParser.parseEditDistance=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="edit distance must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.editDistance=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseBoost=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="boost must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.boost=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},function(e,r){typeof define=="function"&&define.amd?define(r):typeof ee=="object"?te.exports=r():e.lunr=r()}(this,function(){return t})})()});var ie=Z((Te,ne)=>{"use strict";var Le=/["'&<>]/;ne.exports=Ee;function Ee(t){var e=""+t,r=Le.exec(e);if(!r)return e;var n,i="",s=0,o=0;for(s=r.index;s`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${t.separator})(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(e,"|")})`,"img");return s=>s.replace(i,r).replace(/<\/mark>(\s+)]*>/img,"$1")}}function ue(t){let e=new lunr.Query(["title","text"]);return new lunr.QueryParser(t,e).parse(),e.clauses}function ce(t,e){let r=new Set(t),n={};for(let i=0;i!n.has(i)))]}var q=class{constructor({config:e,docs:r,index:n,options:i}){this.options=i,this.documents=oe(r),this.highlight=ae(e),lunr.tokenizer.separator=new RegExp(e.separator),typeof n=="undefined"?this.index=lunr(function(){e.lang.length===1&&e.lang[0]!=="en"?this.use(lunr[e.lang[0]]):e.lang.length>1&&this.use(lunr.multiLanguage(...e.lang));let s=ke(["trimmer","stopWordFilter","stemmer"],i.pipeline);for(let o of e.lang.map(a=>a==="en"?lunr:lunr[a]))for(let a of s)this.pipeline.remove(o[a]),this.searchPipeline.remove(o[a]);this.ref("location"),this.field("title",{boost:1e3}),this.field("text"),this.field("tags",{boost:1e6});for(let o of r)this.add(o,{boost:o.boost})}):this.index=lunr.Index.load(n)}search(e){if(e)try{let r=this.highlight(e),n=ue(e).filter(o=>o.presence!==lunr.Query.presence.PROHIBITED),i=this.index.search(`${e}*`).reduce((o,{ref:a,score:u,matchData:c})=>{let h=this.documents.get(a);if(typeof h!="undefined"){let{location:y,title:g,text:b,tags:m,parent:Q}=h,f=ce(n,Object.keys(c.metadata)),p=+!Q+ +Object.values(f).every(L=>L);o.push(X(j({location:y,title:r(g),text:r(b)},m&&{tags:m.map(r)}),{score:u*(1+p),terms:f}))}return o},[]).sort((o,a)=>a.score-o.score).reduce((o,a)=>{let u=this.documents.get(a.location);if(typeof u!="undefined"){let c="parent"in u?u.parent.location:u.location;o.set(c,[...o.get(c)||[],a])}return o},new Map),s;if(this.options.suggestions){let o=this.index.query(a=>{for(let u of n)a.term(u.term,{fields:["title"],presence:lunr.Query.presence.REQUIRED,wildcard:lunr.Query.wildcard.TRAILING})});s=o.length?Object.keys(o[0].matchData.metadata):[]}return j({items:[...i.values()]},typeof s!="undefined"&&{suggestions:s})}catch(r){console.warn(`Invalid query: ${e} \u2013 see https://bit.ly/2s3ChXG`)}return{items:[]}}};var T;(function(i){i[i.SETUP=0]="SETUP",i[i.READY=1]="READY",i[i.QUERY=2]="QUERY",i[i.RESULT=3]="RESULT"})(T||(T={}));var H;function Pe(t){return W(this,null,function*(){let e="../lunr";if(typeof parent!="undefined"&&"IFrameWorker"in parent){let n=document.querySelector("script[src]"),[i]=n.src.split("/worker");e=e.replace("..",i)}let r=[];for(let n of t.lang)n==="ja"&&r.push(`${e}/tinyseg.js`),n!=="en"&&r.push(`${e}/min/lunr.${n}.min.js`);t.lang.length>1&&r.push(`${e}/min/lunr.multi.min.js`),r.length&&(yield importScripts(`${e}/min/lunr.stemmer.support.min.js`,...r))})}function Ie(t){return W(this,null,function*(){switch(t.type){case T.SETUP:return yield Pe(t.data.config),H=new q(t.data),{type:T.READY};case T.QUERY:return{type:T.RESULT,data:H?H.search(t.data):{items:[]}};default:throw new TypeError("Invalid message type")}})}self.lunr=le.default;addEventListener("message",t=>W(void 0,null,function*(){postMessage(yield Ie(t.data))}));})(); +/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */ +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Set + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.tokenizer + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ +/*! + * lunr.utils + * Copyright (C) 2020 Oliver Nightingale + */ +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 + * Copyright (C) 2020 Oliver Nightingale + * @license MIT + */ diff --git a/1.3/assets/stylesheets/main.92048cb8.min.css b/1.3/assets/stylesheets/main.92048cb8.min.css new file mode 100644 index 00000000..d4d59a00 --- /dev/null +++ b/1.3/assets/stylesheets/main.92048cb8.min.css @@ -0,0 +1 @@ +@charset "UTF-8";html{-webkit-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none;box-sizing:border-box}*,:after,:before{box-sizing:inherit}body{margin:0}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}hr{border:0;box-sizing:content-box;display:block;height:.05rem;overflow:visible;padding:0}small{font-size:80%}sub,sup{line-height:1em}img{border-style:none}table{border-collapse:separate;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{background:transparent;border:0;font-family:inherit;font-size:inherit;margin:0;padding:0}input{border:0;outline:none}:root{--md-default-fg-color:rgba(0,0,0,0.87);--md-default-fg-color--light:rgba(0,0,0,0.54);--md-default-fg-color--lighter:rgba(0,0,0,0.32);--md-default-fg-color--lightest:rgba(0,0,0,0.07);--md-default-bg-color:#fff;--md-default-bg-color--light:hsla(0,0%,100%,0.7);--md-default-bg-color--lighter:hsla(0,0%,100%,0.3);--md-default-bg-color--lightest:hsla(0,0%,100%,0.12);--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7);--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:rgba(82,108,254,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}:root>*{--md-code-fg-color:#36464e;--md-code-bg-color:#f5f5f5;--md-code-hl-color:rgba(255,255,0,0.5);--md-code-hl-number-color:#d52a2a;--md-code-hl-special-color:#db1457;--md-code-hl-function-color:#a846b9;--md-code-hl-constant-color:#6e59d9;--md-code-hl-keyword-color:#3f6ec6;--md-code-hl-string-color:#1c7d4d;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:rgba(255,255,0,0.5);--md-typeset-del-color:rgba(245,80,61,0.15);--md-typeset-ins-color:rgba(11,213,112,0.15);--md-typeset-kbd-color:#fafafa;--md-typeset-kbd-accent-color:#fff;--md-typeset-kbd-border-color:#b8b8b8;--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-footer-fg-color:#fff;--md-footer-fg-color--light:hsla(0,0%,100%,0.7);--md-footer-fg-color--lighter:hsla(0,0%,100%,0.3);--md-footer-bg-color:rgba(0,0,0,0.87);--md-footer-bg-color--dark:rgba(0,0,0,0.32)}.md-icon svg{fill:currentColor;display:block;height:1.2rem;width:1.2rem}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}aside,body,input{font-feature-settings:"kern","liga";color:var(--md-typeset-color);font-family:var(--md-text-font-family,_),-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif}code,kbd,pre{font-feature-settings:"kern";font-family:var(--md-code-font-family,_),SFMono-Regular,Consolas,Menlo,monospace}:root{--md-typeset-table--ascending:url('data:image/svg+xml;charset=utf-8,');--md-typeset-table--descending:url('data:image/svg+xml;charset=utf-8,')}.md-typeset{-webkit-print-color-adjust:exact;color-adjust:exact;font-size:.8rem;line-height:1.6}@media print{.md-typeset{font-size:.68rem}}.md-typeset blockquote,.md-typeset dl,.md-typeset figure,.md-typeset ol,.md-typeset pre,.md-typeset ul{margin:1em 0}.md-typeset h1{color:var(--md-default-fg-color--light);font-size:2em;line-height:1.3;margin:0 0 1.25em}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{font-size:1.5625em;line-height:1.4;margin:1.6em 0 .64em}.md-typeset h3{font-size:1.25em;font-weight:400;letter-spacing:-.01em;line-height:1.5;margin:1.6em 0 .8em}.md-typeset h2+h3{margin-top:.8em}.md-typeset h4{font-weight:700;letter-spacing:-.01em;margin:1em 0}.md-typeset h5,.md-typeset h6{color:var(--md-default-fg-color--light);font-size:.8em;font-weight:700;letter-spacing:-.01em;margin:1.25em 0}.md-typeset h5{text-transform:uppercase}.md-typeset hr{border-bottom:.05rem solid var(--md-default-fg-color--lightest);display:flow-root;margin:1.5em 0}.md-typeset a{color:var(--md-typeset-a-color);word-break:break-word}.md-typeset a,.md-typeset a:before{transition:color 125ms}.md-typeset a:focus,.md-typeset a:hover{color:var(--md-accent-fg-color)}.md-typeset a.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-typeset code,.md-typeset kbd,.md-typeset pre{color:var(--md-code-fg-color);direction:ltr}@media print{.md-typeset code,.md-typeset kbd,.md-typeset pre{white-space:pre-wrap}}.md-typeset code{background-color:var(--md-code-bg-color);border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone;font-size:.85em;padding:0 .2941176471em;word-break:break-word}.md-typeset code:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{background-color:transparent;box-shadow:none;margin:initial;padding:initial}.md-typeset a code{color:currentColor}.md-typeset pre{display:flow-root;line-height:1.4;position:relative}.md-typeset pre>code{-webkit-box-decoration-break:slice;box-decoration-break:slice;box-shadow:none;display:block;margin:0;overflow:auto;padding:.7720588235em 1.1764705882em;scrollbar-color:var(--md-default-fg-color--lighter) transparent;scrollbar-width:thin;touch-action:auto;word-break:normal}.md-typeset pre>code:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-typeset pre>code::-webkit-scrollbar{height:.2rem;width:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@media screen and (max-width:44.9375em){.md-content>.md-typeset>pre{margin:1em -.8rem}.md-content>.md-typeset>pre code{border-radius:0}}.md-typeset kbd{background-color:var(--md-typeset-kbd-color);border-radius:.1rem;box-shadow:0 .1rem 0 .05rem var(--md-typeset-kbd-border-color),0 .1rem 0 var(--md-typeset-kbd-border-color),0 -.1rem .2rem var(--md-typeset-kbd-accent-color) inset;color:var(--md-default-fg-color);display:inline-block;font-size:.75em;padding:0 .6666666667em;vertical-align:text-top;word-break:break-word}.md-typeset mark{background-color:var(--md-typeset-mark-color);-webkit-box-decoration-break:clone;box-decoration-break:clone;color:inherit;word-break:break-word}.md-typeset abbr{border-bottom:.05rem dotted var(--md-default-fg-color--light);cursor:help;text-decoration:none}@media (hover:none){.md-typeset abbr{position:relative}.md-typeset abbr[title]:focus:after,.md-typeset abbr[title]:hover:after{background-color:var(--md-default-fg-color);border-radius:.1rem;box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);color:var(--md-default-bg-color);content:attr(title);display:inline-block;font-size:.7rem;left:0;margin-top:2em;max-width:80%;min-width:-webkit-max-content;min-width:-moz-max-content;min-width:max-content;padding:.2rem .3rem;position:absolute;width:auto}}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.078125em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-left:0;margin-right:.078125em}.md-typeset blockquote{border-left:.2rem solid var(--md-default-fg-color--lighter);color:var(--md-default-fg-color--light);display:flow-root;padding-left:.6rem}[dir=rtl] .md-typeset blockquote{border-left:initial;border-right:.2rem solid var(--md-default-fg-color--lighter);padding-left:0;padding-right:.6rem}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{display:flow-root;margin-left:.625em;padding:0}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-left:0;margin-right:.625em}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-left:0;margin-right:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-left:0;margin-right:.625em}.md-typeset dd{margin:1em 0 1.5em 1.875em}[dir=rtl] .md-typeset dd{margin-left:0;margin-right:1.875em}.md-typeset img,.md-typeset svg{height:auto;max-width:100%}.md-typeset img[align=left],.md-typeset svg[align=left]{margin:1em 1em 1em 0}.md-typeset img[align=right],.md-typeset svg[align=right]{margin:1em 0 1em 1em}.md-typeset img[align]:only-child,.md-typeset svg[align]:only-child{margin-top:0}.md-typeset figure{display:flow-root;margin:0 auto;max-width:100%;text-align:center;width:-webkit-fit-content;width:-moz-fit-content;width:fit-content}.md-typeset figure img{display:block}.md-typeset figcaption{font-style:italic;margin:1em auto 2em;max-width:24rem}.md-typeset iframe{max-width:100%}.md-typeset table:not([class]){background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1);display:inline-block;font-size:.64rem;max-width:100%;overflow:auto;touch-action:auto}@media print{.md-typeset table:not([class]){display:table}}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td>:first-child,.md-typeset table:not([class]) th>:first-child{margin-top:0}.md-typeset table:not([class]) td>:last-child,.md-typeset table:not([class]) th>:last-child{margin-bottom:0}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{background-color:var(--md-default-fg-color--light);color:var(--md-default-bg-color);min-width:5rem;padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) th a{color:inherit}.md-typeset table:not([class]) td{border-top:.05rem solid var(--md-default-fg-color--lightest);padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) tr{transition:background-color 125ms}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.04);box-shadow:0 .05rem 0 var(--md-default-bg-color) inset}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset table th[role=columnheader]{cursor:pointer}.md-typeset table th[role=columnheader]:after{content:"";display:inline-block;height:1.2em;margin-left:.5em;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:sub;width:1.2em}.md-typeset table th[role=columnheader][aria-sort=ascending]:after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--ascending);mask-image:var(--md-typeset-table--ascending)}.md-typeset table th[role=columnheader][aria-sort=descending]:after{background-color:currentColor;-webkit-mask-image:var(--md-typeset-table--descending);mask-image:var(--md-typeset-table--descending)}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;touch-action:auto}.md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}@media print{.md-typeset__table{display:block}}html .md-typeset__table table{display:table;margin:0;overflow:hidden;width:100%}html{font-size:125%;height:100%;overflow-x:hidden}@media screen and (min-width:100em){html{font-size:137.5%}}@media screen and (min-width:125em){html{font-size:150%}}body{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;font-size:.5rem;min-height:100%;position:relative;width:100%}@media print{body{display:block}}@media screen and (max-width:59.9375em){body[data-md-state=lock]{position:fixed}}.md-grid{margin-left:auto;margin-right:auto;max-width:61rem}.md-container{display:flex;flex-direction:column;flex-grow:1}@media print{.md-container{display:block}}.md-main{flex-grow:1}.md-main__inner{display:flex;height:100%;margin-top:1.5rem}.md-ellipsis{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.md-toggle{display:none}.md-option{height:0;opacity:0;position:absolute;width:0}.md-option:checked+label:not([hidden]){display:block}.md-option.focus-visible+label{outline-color:var(--md-accent-fg-color);outline-style:auto}.md-skip{background-color:var(--md-default-fg-color);border-radius:.1rem;color:var(--md-default-bg-color);font-size:.64rem;margin:.5rem;opacity:0;outline-color:var(--md-accent-fg-color);padding:.3rem .5rem;position:fixed;transform:translateY(.4rem);z-index:-1}.md-skip:focus{opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 175ms 75ms;z-index:10}@page{margin:25mm}.md-banner{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color);overflow:auto}@media print{.md-banner{display:none}}.md-banner--warning{background:var(--md-typeset-mark-color);color:var(--md-default-fg-color)}.md-banner__inner{font-size:.7rem;margin:.6rem auto;padding:0 .8rem}:root{--md-clipboard-icon:url('data:image/svg+xml;charset=utf-8,')}.md-clipboard{border-radius:.1rem;color:var(--md-default-fg-color--lightest);cursor:pointer;height:1.5em;outline-color:var(--md-accent-fg-color);outline-offset:.1rem;position:absolute;right:.5em;top:.5em;transition:color .25s;width:1.5em;z-index:1}@media print{.md-clipboard{display:none}}.md-clipboard:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}:hover>.md-clipboard{color:var(--md-default-fg-color--light)}.md-clipboard:focus,.md-clipboard:hover{color:var(--md-accent-fg-color)}.md-clipboard:after{background-color:currentColor;content:"";display:block;height:1.125em;margin:0 auto;-webkit-mask-image:var(--md-clipboard-icon);mask-image:var(--md-clipboard-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:1.125em}.md-clipboard--inline{cursor:pointer}.md-clipboard--inline code{transition:color .25s,background-color .25s}.md-clipboard--inline:focus code,.md-clipboard--inline:hover code{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-content{flex-grow:1;min-width:0}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}@media screen and (min-width:76.25em){.md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}[dir=rtl] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-left:.8rem;margin-right:1.2rem}.md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-right:1.2rem}[dir=rtl] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem;margin-right:.8rem}}.md-content__inner:before{content:"";display:block;height:.4rem}.md-content__inner>:last-child{margin-bottom:0}.md-content__button{float:right;margin:.4rem 0 .4rem .4rem;padding:0}@media print{.md-content__button{display:none}}[dir=rtl] .md-content__button{float:left;margin-left:0;margin-right:.4rem}[dir=rtl] .md-content__button svg{transform:scaleX(-1)}.md-typeset .md-content__button{color:var(--md-default-fg-color--lighter)}.md-content__button svg{display:inline;vertical-align:top}.md-dialog{background-color:var(--md-default-fg-color);border-radius:.1rem;bottom:.8rem;box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);left:auto;min-width:11.1rem;opacity:0;padding:.4rem .6rem;pointer-events:none;position:fixed;right:.8rem;transform:translateY(100%);transition:transform 0ms .4s,opacity .4s;z-index:3}@media print{.md-dialog{display:none}}[dir=rtl] .md-dialog{left:.8rem;right:auto}.md-dialog[data-md-state=open]{opacity:1;pointer-events:auto;transform:translateY(0);transition:transform .4s cubic-bezier(.075,.85,.175,1),opacity .4s}.md-dialog__inner{color:var(--md-default-bg-color);font-size:.7rem}.md-typeset .md-button{border:.1rem solid;border-radius:.1rem;color:var(--md-primary-fg-color);display:inline-block;font-weight:700;padding:.625em 2em;transition:color 125ms,background-color 125ms,border-color 125ms}.md-typeset .md-button--primary{background-color:var(--md-primary-fg-color);border-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color)}.md-typeset .md-button:focus,.md-typeset .md-button:hover{background-color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}.md-typeset .md-input{border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 .025rem .05rem rgba(0,0,0,.1);font-size:.8rem;height:1.8rem;padding:0 .6rem;transition:box-shadow .25s}.md-typeset .md-input:focus,.md-typeset .md-input:hover{box-shadow:0 .4rem 1rem rgba(0,0,0,.15),0 .025rem .05rem rgba(0,0,0,.15)}.md-typeset .md-input--stretch{width:100%}.md-header{background-color:var(--md-primary-fg-color);box-shadow:0 0 .2rem transparent,0 .2rem .4rem transparent;color:var(--md-primary-bg-color);left:0;position:-webkit-sticky;position:sticky;right:0;top:0;z-index:3}@media print{.md-header{display:none}}.md-header[data-md-state=shadow]{box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2);transition:transform .25s cubic-bezier(.1,.7,.1,1),box-shadow .25s}.md-header[data-md-state=hidden]{transform:translateY(-100%);transition:transform .25s cubic-bezier(.8,0,.6,1),box-shadow .25s}.md-header__inner{align-items:center;display:flex;padding:0 .2rem}.md-header__button{color:currentColor;cursor:pointer;margin:.2rem;outline-color:var(--md-accent-fg-color);padding:.4rem;position:relative;transition:opacity .25s;vertical-align:middle;z-index:1}.md-header__button:hover{opacity:.7}.md-header__button:not([hidden]){display:inline-block}.md-header__button:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-header__button.md-logo{margin:.2rem;padding:.4rem}@media screen and (max-width:76.1875em){.md-header__button.md-logo{display:none}}.md-header__button.md-logo img,.md-header__button.md-logo svg{fill:currentColor;display:block;height:1.2rem;width:1.2rem}@media screen and (min-width:60em){.md-header__button[for=__search]{display:none}}.no-js .md-header__button[for=__search]{display:none}[dir=rtl] .md-header__button[for=__search] svg{transform:scaleX(-1)}@media screen and (min-width:76.25em){.md-header__button[for=__drawer]{display:none}}.md-header__topic{display:flex;max-width:100%;position:absolute;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-header__topic+.md-header__topic{opacity:0;pointer-events:none;transform:translateX(1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__topic+.md-header__topic{transform:translateX(-1.25rem)}.md-header__title{flex-grow:1;font-size:.9rem;height:2.4rem;line-height:2.4rem;margin-left:1rem;margin-right:.4rem}.md-header__title[data-md-state=active] .md-header__topic{opacity:0;pointer-events:none;transform:translateX(-1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__title[data-md-state=active] .md-header__topic{transform:translateX(1.25rem)}.md-header__title[data-md-state=active] .md-header__topic+.md-header__topic{opacity:1;pointer-events:auto;transform:translateX(0);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;z-index:0}.md-header__title>.md-header__ellipsis{height:100%;position:relative;width:100%}.md-header__option{display:flex;flex-shrink:0;max-width:100%;transition:max-width 0ms .25s,opacity .25s .25s;white-space:nowrap}[data-md-toggle=search]:checked~.md-header .md-header__option{max-width:0;opacity:0;transition:max-width 0ms,opacity 0ms}.md-header__source{display:none}@media screen and (min-width:60em){.md-header__source{display:block;margin-left:1rem;max-width:11.7rem;width:11.7rem}[dir=rtl] .md-header__source{margin-left:0;margin-right:1rem}}@media screen and (min-width:76.25em){.md-header__source{margin-left:1.4rem}[dir=rtl] .md-header__source{margin-right:1.4rem}}.md-footer{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color)}@media print{.md-footer{display:none}}.md-footer__inner{overflow:auto;padding:.2rem}.md-footer__link{display:flex;outline-color:var(--md-accent-fg-color);padding-bottom:.4rem;padding-top:1.4rem;transition:opacity .25s}@media screen and (min-width:45em){.md-footer__link{width:50%}}.md-footer__link:focus,.md-footer__link:hover{opacity:.7}.md-footer__link--prev{float:left}@media screen and (max-width:44.9375em){.md-footer__link--prev{width:25%}.md-footer__link--prev .md-footer__title{display:none}}[dir=rtl] .md-footer__link--prev{float:right}[dir=rtl] .md-footer__link--prev svg{transform:scaleX(-1)}.md-footer__link--next{float:right;text-align:right}@media screen and (max-width:44.9375em){.md-footer__link--next{width:75%}}[dir=rtl] .md-footer__link--next{float:left;text-align:left}[dir=rtl] .md-footer__link--next svg{transform:scaleX(-1)}.md-footer__title{flex-grow:1;font-size:.9rem;line-height:2.4rem;max-width:calc(100% - 2.4rem);padding:0 1rem;position:relative}.md-footer__button{margin:.2rem;padding:.4rem}.md-footer__direction{font-size:.64rem;left:0;margin-top:-1rem;opacity:.7;padding:0 1rem;position:absolute;right:0}.md-footer-meta{background-color:var(--md-footer-bg-color--dark)}.md-footer-meta__inner{display:flex;flex-wrap:wrap;justify-content:space-between;padding:.2rem}html .md-footer-meta.md-typeset a{color:var(--md-footer-fg-color--light)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:var(--md-footer-fg-color)}.md-footer-copyright{color:var(--md-footer-fg-color--lighter);font-size:.64rem;margin:auto .6rem;padding:.4rem 0;width:100%}@media screen and (min-width:45em){.md-footer-copyright{width:auto}}.md-footer-copyright__highlight{color:var(--md-footer-fg-color--light)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}@media screen and (min-width:45em){.md-footer-social{padding:.6rem 0}}.md-footer-social__link{display:inline-block;height:1.6rem;text-align:center;width:1.6rem}.md-footer-social__link:before{line-height:1.9}.md-footer-social__link svg{fill:currentColor;max-height:.8rem;vertical-align:-25%}:root{--md-nav-icon--prev:url('data:image/svg+xml;charset=utf-8,');--md-nav-icon--next:url('data:image/svg+xml;charset=utf-8,');--md-toc-icon:url('data:image/svg+xml;charset=utf-8,')}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;font-weight:700;overflow:hidden;padding:0 .6rem;text-overflow:ellipsis}.md-nav__title .md-nav__button{display:none}.md-nav__title .md-nav__button img{height:100%;width:auto}.md-nav__title .md-nav__button.md-logo img,.md-nav__title .md-nav__button.md-logo svg{fill:currentColor;display:block;height:2.4rem;width:2.4rem}.md-nav__list{margin:0;padding:0}.md-nav__item{display:block;padding:0 .6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-left:0;padding-right:.6rem}.md-nav__link{cursor:pointer;display:block;margin-top:.625em;overflow:hidden;scroll-snap-align:start;text-overflow:ellipsis;transition:color 125ms}.md-nav__link[data-md-state=blur]{color:var(--md-default-fg-color--light)}.md-nav__link--container{display:flex}.md-nav__link--container>:first-child{flex-grow:1}.md-nav__link--container>*{cursor:inherit}.md-nav__item .md-nav__link--active{color:var(--md-typeset-a-color)}.md-nav__link:focus,.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav__link.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-nav--primary .md-nav__link[for=__toc]{display:none}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{background-color:currentColor;display:block;height:100%;-webkit-mask-image:var(--md-toc-icon);mask-image:var(--md-toc-icon);width:100%}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__source{display:none}@media screen and (max-width:76.1875em){.md-nav--primary,.md-nav--primary .md-nav{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;height:100%;left:0;position:absolute;right:0;top:0;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}.md-nav--primary .md-nav__title{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);cursor:pointer;font-weight:400;height:5.6rem;line-height:2.4rem;padding:3rem .8rem .2rem;position:relative;white-space:nowrap}.md-nav--primary .md-nav__title .md-nav__icon{display:block;height:1.2rem;left:.4rem;margin:.2rem;position:absolute;top:.4rem;width:1.2rem}[dir=rtl] .md-nav--primary .md-nav__title .md-nav__icon{left:auto;right:.4rem}.md-nav--primary .md-nav__title .md-nav__icon:after{background-color:currentColor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--prev);mask-image:var(--md-nav-icon--prev);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}.md-nav--primary .md-nav__title~.md-nav__list{background-color:var(--md-default-bg-color);box-shadow:0 .05rem 0 var(--md-default-fg-color--lightest) inset;overflow-y:auto;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory;touch-action:pan-y}.md-nav--primary .md-nav__title~.md-nav__list>:first-child{border-top:0}.md-nav--primary .md-nav__title[for=__drawer]{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color)}.md-nav--primary .md-nav__title .md-logo{display:block;left:.2rem;margin:.2rem;padding:.4rem;position:absolute;top:.2rem}[dir=rtl] .md-nav--primary .md-nav__title .md-logo{left:auto;right:.2rem}.md-nav--primary .md-nav__list{flex:1}.md-nav--primary .md-nav__item{border-top:.05rem solid var(--md-default-fg-color--lightest);padding:0}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-left:2.4rem;padding-right:.8rem}.md-nav--primary .md-nav__item--active>.md-nav__link{color:var(--md-typeset-a-color)}.md-nav--primary .md-nav__item--active>.md-nav__link:focus,.md-nav--primary .md-nav__item--active>.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link{margin-top:0;padding:.6rem .8rem;position:relative}.md-nav--primary .md-nav__link .md-nav__icon{color:inherit;font-size:1.2rem;height:1.2rem;margin-top:-.6rem;position:absolute;right:.6rem;top:50%;width:1.2rem}[dir=rtl] .md-nav--primary .md-nav__link .md-nav__icon{left:.6rem;right:auto}.md-nav--primary .md-nav__link .md-nav__icon:after{background-color:currentColor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-nav--primary .md-nav__icon:after{transform:scale(-1)}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{background-color:transparent;position:static}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:0;padding-right:1.4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:0;padding-right:2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:0;padding-right:2.6rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:0;padding-right:3.2rem}.md-nav--secondary{background-color:transparent}.md-nav__toggle~.md-nav{display:flex;opacity:0;transform:translateX(100%);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity 125ms 50ms}[dir=rtl] .md-nav__toggle~.md-nav{transform:translateX(-100%)}.md-nav__toggle:checked~.md-nav{opacity:1;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 125ms 125ms}.md-nav__toggle:checked~.md-nav>.md-nav__list{-webkit-backface-visibility:hidden;backface-visibility:hidden}}@media screen and (max-width:59.9375em){.md-nav--primary .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__link[for=__toc]{padding-left:2.4rem;padding-right:.8rem}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--primary .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:flex}.md-nav__source{background-color:var(--md-primary-fg-color--dark);color:var(--md-primary-bg-color);display:block;padding:0 .2rem}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-nav--integrated .md-nav__link[for=__toc]{display:block;padding-right:2.4rem;scroll-snap-align:none}[dir=rtl] .md-nav--integrated .md-nav__link[for=__toc]{padding-left:2.4rem;padding-right:.8rem}.md-nav--integrated .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--integrated .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{display:flex}}@media screen and (min-width:60em){.md-nav--secondary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:-webkit-sticky;position:sticky;top:0}.md-nav--secondary .md-nav__title[for=__toc]{scroll-snap-align:start}.md-nav--secondary .md-nav__title .md-nav__icon{display:none}}@media screen and (min-width:76.25em){.md-nav{transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav--primary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:-webkit-sticky;position:sticky;top:0}.md-nav--primary .md-nav__title[for=__drawer]{scroll-snap-align:start}.md-nav--primary .md-nav__title .md-nav__icon{display:none}.md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav,.md-nav__toggle:indeterminate~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--section{display:block;margin:1.25em 0}.md-nav__item--section:last-child{margin-bottom:0}.md-nav__item--section>.md-nav__link{font-weight:700;pointer-events:none}.md-nav__item--section>.md-nav__link>*{pointer-events:auto}.md-nav__item--section>.md-nav__link .md-icon{display:none}.md-nav__item--section>.md-nav{display:block}.md-nav__item--section>.md-nav>.md-nav__list>.md-nav__item{padding:0}.md-nav__icon{float:right;height:.9rem;transition:transform .25s;width:.9rem}[dir=rtl] .md-nav__icon{float:left;transform:rotate(180deg)}.md-nav__icon:after{background-color:currentColor;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:-.1rem;width:100%}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link .md-nav__icon,.md-nav__item--nested .md-nav__toggle:indeterminate~.md-nav__link .md-nav__icon{transform:rotate(90deg)}.md-nav--lifted>.md-nav__list>.md-nav__item--nested,.md-nav--lifted>.md-nav__title{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active{display:block;padding:0}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav>.md-nav__title{display:block;padding:0 .6rem;pointer-events:none;scroll-snap-align:start}.md-nav--lifted .md-nav[data-md-level="1"]{display:block}.md-nav--lifted .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-right:.6rem}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{border-left:.05rem solid var(--md-primary-fg-color);display:block;margin-bottom:1.25em}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav>.md-nav__title{display:none}}:root{--md-search-result-icon:url('data:image/svg+xml;charset=utf-8,')}.md-search{position:relative}@media screen and (min-width:60em){.md-search{padding:.2rem 0}}.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}@media screen and (max-width:59.9375em){.md-search__overlay{background-color:var(--md-default-bg-color);border-radius:1rem;height:2rem;left:-2.2rem;overflow:hidden;pointer-events:none;position:absolute;top:.2rem;transform-origin:center;transition:transform .3s .1s,opacity .2s .2s;width:2rem}[dir=rtl] .md-search__overlay{left:auto;right:-2.2rem}[data-md-toggle=search]:checked~.md-header .md-search__overlay{opacity:1;transition:transform .4s,opacity .1s}}@media screen and (min-width:60em){.md-search__overlay{background-color:rgba(0,0,0,.54);cursor:pointer;height:0;left:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0}[dir=rtl] .md-search__overlay{left:auto;right:0}[data-md-toggle=search]:checked~.md-header .md-search__overlay{height:200vh;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@media screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(45)}}@media screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(60)}}@media screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(75)}}.md-search__inner{-webkit-backface-visibility:hidden;backface-visibility:hidden}@media screen and (max-width:59.9375em){.md-search__inner{height:100%;left:100%;opacity:0;position:fixed;top:0;transform:translateX(5%);transition:right 0ms .3s,left 0ms .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;width:100%;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;opacity:1;transform:translateX(0);transition:right 0ms 0ms,left 0ms 0ms,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{left:auto;right:0}html [dir=rtl] .md-search__inner{left:auto;right:100%;transform:translateX(-5%)}}@media screen and (min-width:60em){.md-search__inner{float:right;padding:.1rem 0;position:relative;transition:width .25s cubic-bezier(.1,.7,.1,1);width:11.7rem}[dir=rtl] .md-search__inner{float:left}}@media screen and (min-width:60em) and (max-width:76.1875em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}}@media screen and (min-width:76.25em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}}.md-search__form{background-color:var(--md-default-bg-color);box-shadow:0 0 .6rem transparent;height:2.4rem;position:relative;transition:color .25s,background-color .25s;z-index:2}@media screen and (min-width:60em){.md-search__form{background-color:rgba(0,0,0,.26);border-radius:.1rem;height:1.8rem}.md-search__form:hover{background-color:hsla(0,0%,100%,.12)}}[data-md-toggle=search]:checked~.md-header .md-search__form{background-color:var(--md-default-bg-color);border-radius:.1rem .1rem 0 0;box-shadow:0 0 .6rem rgba(0,0,0,.07);color:var(--md-default-fg-color)}.md-search__input{background:transparent;font-size:.9rem;height:100%;padding:0 2.2rem 0 3.6rem;position:relative;text-overflow:ellipsis;width:100%;z-index:2}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s;transition:color .25s}.md-search__input::-moz-placeholder{-moz-transition:color .25s;transition:color .25s}.md-search__input::-ms-input-placeholder{-ms-transition:color .25s;transition:color .25s}.md-search__input::placeholder{transition:color .25s}.md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:var(--md-default-fg-color--light)}.md-search__input::-ms-clear{display:none}@media screen and (max-width:59.9375em){.md-search__input{font-size:.9rem;height:2.4rem;width:100%}}@media screen and (min-width:60em){.md-search__input{color:inherit;font-size:.8rem;padding-left:2.2rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input::-webkit-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-moz-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::-ms-input-placeholder{color:var(--md-primary-bg-color--light)}.md-search__input::placeholder{color:var(--md-primary-bg-color--light)}.md-search__input+.md-search__icon{color:var(--md-primary-bg-color)}[data-md-toggle=search]:checked~.md-header .md-search__input{text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:var(--md-default-fg-color--light)}}.md-search__icon{cursor:pointer;display:inline-block;height:1.2rem;transition:color .25s,opacity .25s;width:1.2rem}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{left:.5rem;position:absolute;top:.3rem;z-index:2}[dir=rtl] .md-search__icon[for=__search]{left:auto;right:.5rem}[dir=rtl] .md-search__icon[for=__search] svg{transform:scaleX(-1)}@media screen and (max-width:59.9375em){.md-search__icon[for=__search]{left:.8rem;top:.6rem}[dir=rtl] .md-search__icon[for=__search]{left:auto;right:.8rem}.md-search__icon[for=__search] svg:first-child{display:none}}@media screen and (min-width:60em){.md-search__icon[for=__search]{pointer-events:none}.md-search__icon[for=__search] svg:last-child{display:none}}.md-search__options{pointer-events:none;position:absolute;right:.5rem;top:.3rem;z-index:2}[dir=rtl] .md-search__options{left:.5rem;right:auto}@media screen and (max-width:59.9375em){.md-search__options{right:.8rem;top:.6rem}[dir=rtl] .md-search__options{left:.8rem;right:auto}}.md-search__options>*{color:var(--md-default-fg-color--light);margin-left:.2rem;opacity:0;transform:scale(.75);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-search__options>:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>*{opacity:1;pointer-events:auto;transform:scale(1)}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>:hover{opacity:.7}.md-search__suggest{align-items:center;color:var(--md-default-fg-color--lighter);display:flex;font-size:.9rem;height:100%;opacity:0;padding:0 2.2rem 0 3.6rem;position:absolute;top:0;transition:opacity 50ms;white-space:nowrap;width:100%}[dir=rtl] .md-search__suggest{padding:0 3.6rem 0 2.2rem}@media screen and (min-width:60em){.md-search__suggest{font-size:.8rem;padding-left:2.2rem}[dir=rtl] .md-search__suggest{padding-right:2.2rem}}[data-md-toggle=search]:checked~.md-header .md-search__suggest{opacity:1;transition:opacity .3s .1s}.md-search__output{border-radius:0 0 .1rem .1rem;overflow:hidden;position:absolute;width:100%;z-index:1}@media screen and (max-width:59.9375em){.md-search__output{bottom:0;top:2.4rem}}@media screen and (min-width:60em){.md-search__output{opacity:0;top:1.9rem;transition:opacity .4s}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}}.md-search__scrollwrap{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);height:100%;overflow-y:auto;touch-action:pan-y}@media (-webkit-max-device-pixel-ratio:1),(max-resolution:1dppx){.md-search__scrollwrap{transform:translateZ(0)}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap{width:23.4rem}}@media screen and (min-width:76.25em){.md-search__scrollwrap{width:34.4rem}}@media screen and (min-width:60em){.md-search__scrollwrap{max-height:0;scrollbar-color:var(--md-default-fg-color--lighter) transparent;scrollbar-width:thin}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-search__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}}.md-search-result{color:var(--md-default-fg-color);word-break:break-word}.md-search-result__meta{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.8rem;padding:0 .8rem;scroll-snap-align:start}@media screen and (min-width:60em){.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-left:0;padding-right:2.2rem}}.md-search-result__list{list-style:none;margin:0;padding:0}.md-search-result__item{box-shadow:0 -.05rem 0 var(--md-default-fg-color--lightest)}.md-search-result__item:first-child{box-shadow:none}.md-search-result__link{display:block;outline:none;scroll-snap-align:start;transition:background-color .25s}.md-search-result__link:focus,.md-search-result__link:hover{background-color:var(--md-accent-fg-color--transparent)}.md-search-result__link:last-child p:last-child{margin-bottom:.6rem}.md-search-result__more summary{color:var(--md-typeset-a-color);cursor:pointer;display:block;font-size:.64rem;outline:none;padding:.75em .8rem;scroll-snap-align:start;transition:color .25s,background-color .25s}@media screen and (min-width:60em){.md-search-result__more summary{padding-left:2.2rem}[dir=rtl] .md-search-result__more summary{padding-left:.8rem;padding-right:2.2rem}}.md-search-result__more summary:focus,.md-search-result__more summary:hover{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-search-result__more summary::-webkit-details-marker,.md-search-result__more summary::marker{display:none}.md-search-result__more summary~*>*{opacity:.65}.md-search-result__article{overflow:hidden;padding:0 .8rem;position:relative}@media screen and (min-width:60em){.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-left:.8rem;padding-right:2.2rem}}.md-search-result__article--document .md-search-result__title{font-size:.8rem;font-weight:400;line-height:1.4;margin:.55rem 0}.md-search-result__icon{color:var(--md-default-fg-color--light);height:1.2rem;left:0;margin:.5rem;position:absolute;width:1.2rem}@media screen and (max-width:59.9375em){.md-search-result__icon{display:none}}.md-search-result__icon:after{background-color:currentColor;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-search-result-icon);mask-image:var(--md-search-result-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-search-result__icon{left:auto;right:0}[dir=rtl] .md-search-result__icon:after{transform:scaleX(-1)}.md-search-result__title{font-size:.64rem;font-weight:700;line-height:1.6;margin:.5em 0}.md-search-result__teaser{-webkit-box-orient:vertical;-webkit-line-clamp:2;color:var(--md-default-fg-color--light);display:-webkit-box;font-size:.64rem;line-height:1.6;margin:.5em 0;max-height:2rem;overflow:hidden;text-overflow:ellipsis}@media screen and (max-width:44.9375em){.md-search-result__teaser{-webkit-line-clamp:3;max-height:3rem}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-search-result__teaser{-webkit-line-clamp:3;max-height:3rem}}.md-search-result__teaser mark{background-color:transparent;text-decoration:underline}.md-search-result__terms{font-size:.64rem;font-style:italic;margin:.5em 0}.md-search-result mark{background-color:transparent;color:var(--md-accent-fg-color)}.md-select{position:relative;z-index:1}.md-select__inner{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);color:var(--md-default-fg-color);left:50%;margin-top:.2rem;max-height:0;opacity:0;position:absolute;top:calc(100% - .2rem);transform:translate3d(-50%,.3rem,0);transition:transform .25s 375ms,opacity .25s .25s,max-height 0ms .5s}.md-select:focus-within .md-select__inner,.md-select:hover .md-select__inner{max-height:10rem;opacity:1;transform:translate3d(-50%,0,0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height 0ms}.md-select__inner:after{border-bottom:.2rem solid transparent;border-bottom-color:var(--md-default-bg-color);border-left:.2rem solid transparent;border-right:.2rem solid transparent;border-top:0;content:"";height:0;left:50%;margin-left:-.2rem;margin-top:-.2rem;position:absolute;top:0;width:0}.md-select__list{border-radius:.1rem;font-size:.8rem;list-style-type:none;margin:0;max-height:inherit;overflow:auto;padding:0}.md-select__item{line-height:1.8rem}.md-select__link{cursor:pointer;display:block;outline:none;padding-left:.6rem;padding-right:1.2rem;scroll-snap-align:start;transition:background-color .25s,color .25s;width:100%}[dir=rtl] .md-select__link{padding-left:1.2rem;padding-right:.6rem}.md-select__link:focus,.md-select__link:hover{color:var(--md-accent-fg-color)}.md-select__link:focus{background-color:var(--md-default-fg-color--lightest)}.md-sidebar{align-self:flex-start;flex-shrink:0;padding:1.2rem 0;position:-webkit-sticky;position:sticky;top:2.4rem;width:12.1rem}@media print{.md-sidebar{display:none}}@media screen and (max-width:76.1875em){.md-sidebar--primary{background-color:var(--md-default-bg-color);display:block;height:100%;left:-12.1rem;position:fixed;top:0;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;width:12.1rem;z-index:4}[dir=rtl] .md-sidebar--primary{left:auto;right:-12.1rem}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{transform:translateX(-12.1rem)}.md-sidebar--primary .md-sidebar__scrollwrap{bottom:0;left:0;margin:0;overflow:hidden;position:absolute;right:0;-ms-scroll-snap-type:none;scroll-snap-type:none;top:0}}@media screen and (min-width:76.25em){.md-sidebar{height:0}.no-js .md-sidebar{height:auto}}.md-sidebar--secondary{display:none;order:2}@media screen and (min-width:60em){.md-sidebar--secondary{height:0}.no-js .md-sidebar--secondary{height:auto}.md-sidebar--secondary:not([hidden]){display:block}.md-sidebar--secondary .md-sidebar__scrollwrap{touch-action:pan-y}}.md-sidebar__scrollwrap{-webkit-backface-visibility:hidden;backface-visibility:hidden;margin:0 .2rem;overflow-y:auto;scrollbar-color:var(--md-default-fg-color--lighter) transparent;scrollbar-width:thin}.md-sidebar__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) transparent}.md-sidebar__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@media screen and (max-width:76.1875em){.md-overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0;z-index:4}[data-md-toggle=drawer]:checked~.md-overlay{height:100%;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@-webkit-keyframes facts{0%{height:0}to{height:.65rem}}@keyframes facts{0%{height:0}to{height:.65rem}}@-webkit-keyframes fact{0%{opacity:0;transform:translateY(100%)}50%{opacity:0}to{opacity:1;transform:translateY(0)}}@keyframes fact{0%{opacity:0;transform:translateY(100%)}50%{opacity:0}to{opacity:1;transform:translateY(0)}}:root{--md-source-forks-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-repositories-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-stars-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-version-icon:url('data:image/svg+xml;charset=utf-8,')}.md-source{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:block;font-size:.65rem;line-height:1.2;outline-color:var(--md-accent-fg-color);transition:opacity .25s;white-space:nowrap}.md-source:hover{opacity:.7}.md-source__icon{display:inline-block;height:2.4rem;vertical-align:middle;width:2rem}.md-source__icon svg{margin-left:.6rem;margin-top:.6rem}[dir=rtl] .md-source__icon svg{margin-left:0;margin-right:.6rem}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-left:0;margin-right:-2rem;padding-left:0;padding-right:2rem}.md-source__repository{display:inline-block;margin-left:.6rem;max-width:calc(100% - 1.2rem);overflow:hidden;text-overflow:ellipsis;vertical-align:middle}.md-source__facts{font-size:.55rem;list-style-type:none;margin:.1rem 0 0;opacity:.75;overflow:hidden;padding:0}[data-md-state=done] .md-source__facts{-webkit-animation:facts .25s ease-in;animation:facts .25s ease-in}.md-source__fact{display:inline-block}[data-md-state=done] .md-source__fact{-webkit-animation:fact .4s ease-out;animation:fact .4s ease-out}.md-source__fact:before{background-color:currentColor;content:"";display:inline-block;height:.6rem;margin-right:.1rem;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:text-top;width:.6rem}.md-source__fact:nth-child(1n+2):before{margin-left:.4rem}[dir=rtl] .md-source__fact{margin-left:.1rem;margin-right:0}[dir=rtl] .md-source__fact:nth-child(1n+2):before{margin-left:0;margin-right:.4rem}.md-source__fact--version:before{-webkit-mask-image:var(--md-source-version-icon);mask-image:var(--md-source-version-icon)}.md-source__fact--stars:before{-webkit-mask-image:var(--md-source-stars-icon);mask-image:var(--md-source-stars-icon)}.md-source__fact--forks:before{-webkit-mask-image:var(--md-source-forks-icon);mask-image:var(--md-source-forks-icon)}.md-source__fact--repositories:before{-webkit-mask-image:var(--md-source-repositories-icon);mask-image:var(--md-source-repositories-icon)}.md-tabs{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color);overflow:auto;width:100%}@media print{.md-tabs{display:none}}@media screen and (max-width:76.1875em){.md-tabs{display:none}}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs__list{contain:content;list-style:none;margin:0 0 0 .2rem;padding:0;white-space:nowrap}[dir=rtl] .md-tabs__list{margin-left:0;margin-right:.2rem}.md-tabs__item{display:inline-block;height:2.4rem;padding-left:.6rem;padding-right:.6rem}.md-tabs__link{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:block;font-size:.7rem;margin-top:.8rem;opacity:.7;outline-color:var(--md-accent-fg-color);outline-offset:.2rem;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s}.md-tabs__link--active,.md-tabs__link:focus,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{transition-delay:20ms}.md-tabs__item:nth-child(3) .md-tabs__link{transition-delay:40ms}.md-tabs__item:nth-child(4) .md-tabs__link{transition-delay:60ms}.md-tabs__item:nth-child(5) .md-tabs__link{transition-delay:80ms}.md-tabs__item:nth-child(6) .md-tabs__link{transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{transition-delay:.3s}.md-tabs[data-md-state=hidden] .md-tabs__link{opacity:0;transform:translateY(50%);transition:transform 0ms .1s,opacity .1s}.md-tags{margin-bottom:.75em}.md-tag{background:var(--md-default-fg-color--lightest);border-radius:.4rem;display:inline-block;font-size:.64rem;font-weight:700;line-height:1.6;margin-bottom:.5em;margin-right:.5em;padding:.3125em .9375em}.md-tag[href]{-webkit-tap-highlight-color:transparent;color:inherit;outline:none;transition:color 125ms,background-color 125ms}.md-tag[href]:focus,.md-tag[href]:hover{background-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}[id]>.md-tag{vertical-align:text-top}@-webkit-keyframes md-annotation--pulse{0%{box-shadow:0 0 0 0 var(--md-default-fg-color--lightest)}75%{box-shadow:0 0 0 .625em transparent}to{box-shadow:0 0 0 0 transparent}}@keyframes md-annotation--pulse{0%{box-shadow:0 0 0 0 var(--md-default-fg-color--lightest)}75%{box-shadow:0 0 0 .625em transparent}to{box-shadow:0 0 0 0 transparent}}.md-tooltip{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);color:var(--md-default-fg-color);max-height:0;opacity:0;overflow:auto;position:absolute;transform:translateY(.4rem);transition:transform .25s 375ms,opacity .25s,max-height 0ms .25s,z-index .25s;z-index:0}.md-tooltip__inner{font-size:.64rem;padding:.8rem}.md-tooltip__inner>:first-child{margin-top:0}.md-tooltip__inner>:last-child{margin-bottom:0}:focus-within>.md-tooltip,:focus>.md-tooltip{max-height:1000%;opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height .25s 0ms,z-index 0ms}:focus-within>.md-tooltip--end,:focus>.md-tooltip--end{transform:translate(-100%)}:focus-within>.md-tooltip--center,:focus>.md-tooltip--center{transform:translate(-50%)}.focus-visible>.md-tooltip{outline:var(--md-accent-fg-color) auto}.md-tooltip--end{transform:translate(-100%,.4rem)}.md-tooltip--center{transform:translate(-50%,.4rem)}.md-annotation{outline:none;white-space:normal}.md-annotation:focus-within>*{z-index:2}.md-annotation:not([hidden]){display:inline-block}.md-annotation__index{-webkit-animation:md-annotation--pulse 2s infinite;animation:md-annotation--pulse 2s infinite;background-color:var(--md-default-fg-color--lighter);border-radius:1.25em;color:var(--md-accent-bg-color);cursor:pointer;display:inline-block;min-width:1.4em;padding:0 .375em;position:relative;text-align:center;transition:background-color .25s,z-index .25s;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;z-index:0}:focus-within>.md-annotation__index{-webkit-animation:none;animation:none;transition:background-color .25s,z-index 0ms}:focus-within>.md-annotation__index,:hover>.md-annotation__index{background-color:var(--md-accent-fg-color)}.md-annotation .md-tooltip{margin:-1.1764705882em .7352941176em 0;max-width:60%;min-width:16rem}.md-annotation .md-tooltip--center{margin-top:.7352941176em}.md-top{background:var(--md-primary-fg-color);border-radius:100%;bottom:.4rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 .025rem .05rem rgba(0,0,0,.1);color:var(--md-primary-bg-color);float:right;margin:-2.8rem .4rem .4rem;outline:none;padding:.4rem;position:-webkit-sticky;position:sticky;transform:translateY(0);transition:opacity 125ms,transform 125ms cubic-bezier(.4,0,.2,1),background-color 125ms;z-index:1}[dir=rtl] .md-top{float:left}.md-top[data-md-state=hidden]{opacity:0;pointer-events:none;transform:translateY(-.2rem)}.md-top:focus,.md-top:hover{background:var(--md-accent-fg-color);transform:scale(1.1)}@-webkit-keyframes hoverfix{0%{pointer-events:none}}@keyframes hoverfix{0%{pointer-events:none}}:root{--md-version-icon:url('data:image/svg+xml;charset=utf-8,')}.md-version{flex-shrink:0;font-size:.8rem;height:2.4rem}.md-version__current{color:inherit;cursor:pointer;margin-left:1.4rem;margin-right:.4rem;outline:none;position:relative;top:.05rem}[dir=rtl] .md-version__current{margin-left:.4rem;margin-right:1.4rem}.md-version__current:after{background-color:currentColor;content:"";display:inline-block;height:.6rem;margin-left:.4rem;-webkit-mask-image:var(--md-version-icon);mask-image:var(--md-version-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;width:.4rem}[dir=rtl] .md-version__current:after{margin-left:0;margin-right:.4rem}.md-version__list{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.1),0 0 .05rem rgba(0,0,0,.25);color:var(--md-default-fg-color);list-style-type:none;margin:.2rem .8rem;max-height:0;opacity:0;overflow:auto;padding:0;position:absolute;-ms-scroll-snap-type:y mandatory;scroll-snap-type:y mandatory;top:.15rem;transition:max-height 0ms .5s,opacity .25s .25s;z-index:1}.md-version:focus-within .md-version__list,.md-version:hover .md-version__list{max-height:10rem;opacity:1;transition:max-height 0ms,opacity .25s}@media (pointer:coarse){.md-version:hover .md-version__list{-webkit-animation:hoverfix .25s forwards;animation:hoverfix .25s forwards}.md-version:focus-within .md-version__list{-webkit-animation:none;animation:none}}.md-version__item{line-height:1.8rem}.md-version__link{cursor:pointer;display:block;outline:none;padding-left:.6rem;padding-right:1.2rem;scroll-snap-align:start;transition:color .25s,background-color .25s;white-space:nowrap;width:100%}[dir=rtl] .md-version__link{padding-left:1.2rem;padding-right:.6rem}.md-version__link:focus,.md-version__link:hover{color:var(--md-accent-fg-color)}.md-version__link:focus{background-color:var(--md-default-fg-color--lightest)}:root{--md-admonition-icon--note:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--abstract:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--info:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--tip:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--success:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--question:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--warning:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--failure:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--danger:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--bug:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--example:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--quote:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .admonition,.md-typeset details{background-color:var(--md-admonition-bg-color);border-left:.2rem solid #448aff;border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 .025rem .05rem rgba(0,0,0,.05);color:var(--md-admonition-fg-color);display:flow-root;font-size:.64rem;margin:1.5625em 0;padding:0 .6rem;page-break-inside:avoid}@media print{.md-typeset .admonition,.md-typeset details{box-shadow:none}}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-left:none;border-right:.2rem solid #448aff}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin-bottom:1em;margin-top:1em}.md-typeset .admonition .md-typeset__scrollwrap,.md-typeset details .md-typeset__scrollwrap{margin:1em -.6rem}.md-typeset .admonition .md-typeset__table,.md-typeset details .md-typeset__table{padding:0 .6rem}.md-typeset .admonition>.tabbed-set:only-child,.md-typeset details>.tabbed-set:only-child{margin-top:0}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition-title,.md-typeset summary{background-color:rgba(68,138,255,.1);border-left:.2rem solid #448aff;border-top-left-radius:.1rem;font-weight:700;margin:0 -.6rem 0 -.8rem;padding:.4rem .6rem .4rem 2rem;position:relative}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-left:none;border-right:.2rem solid #448aff;margin:0 -.8rem 0 -.6rem;padding:.4rem 2rem .4rem .6rem}html .md-typeset .admonition-title:last-child,html .md-typeset summary:last-child{margin-bottom:0}.md-typeset .admonition-title:before,.md-typeset summary:before{background-color:#448aff;content:"";height:1rem;left:.6rem;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;width:1rem}[dir=rtl] .md-typeset .admonition-title:before,[dir=rtl] .md-typeset summary:before{left:auto;right:.6rem}.md-typeset .admonition-title+.tabbed-set:last-child,.md-typeset summary+.tabbed-set:last-child{margin-top:0}.md-typeset .admonition.note,.md-typeset details.note{border-color:#448aff}.md-typeset .note>.admonition-title,.md-typeset .note>summary{background-color:rgba(68,138,255,.1);border-color:#448aff}.md-typeset .note>.admonition-title:before,.md-typeset .note>summary:before{background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-color:#00b0ff}.md-typeset .abstract>.admonition-title,.md-typeset .abstract>summary,.md-typeset .summary>.admonition-title,.md-typeset .summary>summary,.md-typeset .tldr>.admonition-title,.md-typeset .tldr>summary{background-color:rgba(0,176,255,.1);border-color:#00b0ff}.md-typeset .abstract>.admonition-title:before,.md-typeset .abstract>summary:before,.md-typeset .summary>.admonition-title:before,.md-typeset .summary>summary:before,.md-typeset .tldr>.admonition-title:before,.md-typeset .tldr>summary:before{background-color:#00b0ff;-webkit-mask-image:var(--md-admonition-icon--abstract);mask-image:var(--md-admonition-icon--abstract);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-color:#00b8d4}.md-typeset .info>.admonition-title,.md-typeset .info>summary,.md-typeset .todo>.admonition-title,.md-typeset .todo>summary{background-color:rgba(0,184,212,.1);border-color:#00b8d4}.md-typeset .info>.admonition-title:before,.md-typeset .info>summary:before,.md-typeset .todo>.admonition-title:before,.md-typeset .todo>summary:before{background-color:#00b8d4;-webkit-mask-image:var(--md-admonition-icon--info);mask-image:var(--md-admonition-icon--info);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-color:#00bfa5}.md-typeset .hint>.admonition-title,.md-typeset .hint>summary,.md-typeset .important>.admonition-title,.md-typeset .important>summary,.md-typeset .tip>.admonition-title,.md-typeset .tip>summary{background-color:rgba(0,191,165,.1);border-color:#00bfa5}.md-typeset .hint>.admonition-title:before,.md-typeset .hint>summary:before,.md-typeset .important>.admonition-title:before,.md-typeset .important>summary:before,.md-typeset .tip>.admonition-title:before,.md-typeset .tip>summary:before{background-color:#00bfa5;-webkit-mask-image:var(--md-admonition-icon--tip);mask-image:var(--md-admonition-icon--tip);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-color:#00c853}.md-typeset .check>.admonition-title,.md-typeset .check>summary,.md-typeset .done>.admonition-title,.md-typeset .done>summary,.md-typeset .success>.admonition-title,.md-typeset .success>summary{background-color:rgba(0,200,83,.1);border-color:#00c853}.md-typeset .check>.admonition-title:before,.md-typeset .check>summary:before,.md-typeset .done>.admonition-title:before,.md-typeset .done>summary:before,.md-typeset .success>.admonition-title:before,.md-typeset .success>summary:before{background-color:#00c853;-webkit-mask-image:var(--md-admonition-icon--success);mask-image:var(--md-admonition-icon--success);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-color:#64dd17}.md-typeset .faq>.admonition-title,.md-typeset .faq>summary,.md-typeset .help>.admonition-title,.md-typeset .help>summary,.md-typeset .question>.admonition-title,.md-typeset .question>summary{background-color:rgba(100,221,23,.1);border-color:#64dd17}.md-typeset .faq>.admonition-title:before,.md-typeset .faq>summary:before,.md-typeset .help>.admonition-title:before,.md-typeset .help>summary:before,.md-typeset .question>.admonition-title:before,.md-typeset .question>summary:before{background-color:#64dd17;-webkit-mask-image:var(--md-admonition-icon--question);mask-image:var(--md-admonition-icon--question);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-color:#ff9100}.md-typeset .attention>.admonition-title,.md-typeset .attention>summary,.md-typeset .caution>.admonition-title,.md-typeset .caution>summary,.md-typeset .warning>.admonition-title,.md-typeset .warning>summary{background-color:rgba(255,145,0,.1);border-color:#ff9100}.md-typeset .attention>.admonition-title:before,.md-typeset .attention>summary:before,.md-typeset .caution>.admonition-title:before,.md-typeset .caution>summary:before,.md-typeset .warning>.admonition-title:before,.md-typeset .warning>summary:before{background-color:#ff9100;-webkit-mask-image:var(--md-admonition-icon--warning);mask-image:var(--md-admonition-icon--warning);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-color:#ff5252}.md-typeset .fail>.admonition-title,.md-typeset .fail>summary,.md-typeset .failure>.admonition-title,.md-typeset .failure>summary,.md-typeset .missing>.admonition-title,.md-typeset .missing>summary{background-color:rgba(255,82,82,.1);border-color:#ff5252}.md-typeset .fail>.admonition-title:before,.md-typeset .fail>summary:before,.md-typeset .failure>.admonition-title:before,.md-typeset .failure>summary:before,.md-typeset .missing>.admonition-title:before,.md-typeset .missing>summary:before{background-color:#ff5252;-webkit-mask-image:var(--md-admonition-icon--failure);mask-image:var(--md-admonition-icon--failure);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-color:#ff1744}.md-typeset .danger>.admonition-title,.md-typeset .danger>summary,.md-typeset .error>.admonition-title,.md-typeset .error>summary{background-color:rgba(255,23,68,.1);border-color:#ff1744}.md-typeset .danger>.admonition-title:before,.md-typeset .danger>summary:before,.md-typeset .error>.admonition-title:before,.md-typeset .error>summary:before{background-color:#ff1744;-webkit-mask-image:var(--md-admonition-icon--danger);mask-image:var(--md-admonition-icon--danger);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.bug,.md-typeset details.bug{border-color:#f50057}.md-typeset .bug>.admonition-title,.md-typeset .bug>summary{background-color:rgba(245,0,87,.1);border-color:#f50057}.md-typeset .bug>.admonition-title:before,.md-typeset .bug>summary:before{background-color:#f50057;-webkit-mask-image:var(--md-admonition-icon--bug);mask-image:var(--md-admonition-icon--bug);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.example,.md-typeset details.example{border-color:#7c4dff}.md-typeset .example>.admonition-title,.md-typeset .example>summary{background-color:rgba(124,77,255,.1);border-color:#7c4dff}.md-typeset .example>.admonition-title:before,.md-typeset .example>summary:before{background-color:#7c4dff;-webkit-mask-image:var(--md-admonition-icon--example);mask-image:var(--md-admonition-icon--example);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-color:#9e9e9e}.md-typeset .cite>.admonition-title,.md-typeset .cite>summary,.md-typeset .quote>.admonition-title,.md-typeset .quote>summary{background-color:hsla(0,0%,62%,.1);border-color:#9e9e9e}.md-typeset .cite>.admonition-title:before,.md-typeset .cite>summary:before,.md-typeset .quote>.admonition-title:before,.md-typeset .quote>summary:before{background-color:#9e9e9e;-webkit-mask-image:var(--md-admonition-icon--quote);mask-image:var(--md-admonition-icon--quote);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain}:root{--md-footnotes-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .footnote{color:var(--md-default-fg-color--light);font-size:.64rem}.md-typeset .footnote>ol{margin-left:0}.md-typeset .footnote>ol>li{transition:color 125ms}.md-typeset .footnote>ol>li:target{color:var(--md-default-fg-color)}.md-typeset .footnote>ol>li:hover .footnote-backref,.md-typeset .footnote>ol>li:target .footnote-backref{opacity:1;transform:translateX(0)}.md-typeset .footnote>ol>li>:first-child{margin-top:0}.md-typeset .footnote-ref{font-size:.75em;font-weight:700}html .md-typeset .footnote-ref{outline-offset:.1rem}.md-typeset .footnote-backref{color:var(--md-typeset-a-color);display:inline-block;font-size:0;opacity:0;transform:translateX(.25rem);transition:color .25s,transform .25s .25s,opacity 125ms .25s;vertical-align:text-bottom}@media print{.md-typeset .footnote-backref{color:var(--md-typeset-a-color);opacity:1;transform:translateX(0)}}[dir=rtl] .md-typeset .footnote-backref{transform:translateX(-.25rem)}.md-typeset .footnote-backref:hover{color:var(--md-accent-fg-color)}.md-typeset .footnote-backref:before{background-color:currentColor;content:"";display:inline-block;height:.8rem;-webkit-mask-image:var(--md-footnotes-icon);mask-image:var(--md-footnotes-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.8rem}[dir=rtl] .md-typeset .footnote-backref:before svg{transform:scaleX(-1)}.md-typeset [id^="fnref:"]:target{margin-top:-3.4rem;padding-top:3.4rem;scroll-margin-top:0}.md-typeset [id^="fnref:"]:target>.footnote-ref{outline:auto}.md-typeset [id^="fn:"]:target{margin-top:-3.45rem;padding-top:3.45rem;scroll-margin-top:0}.md-typeset .headerlink{color:var(--md-default-fg-color--lighter);display:inline-block;margin-left:.5rem;opacity:0;transition:color .25s,opacity 125ms}@media print{.md-typeset .headerlink{display:none}}[dir=rtl] .md-typeset .headerlink{margin-left:0;margin-right:.5rem}.md-typeset .headerlink:focus,.md-typeset :hover>.headerlink,.md-typeset :target>.headerlink{opacity:1;transition:color .25s,opacity 125ms}.md-typeset .headerlink:focus,.md-typeset .headerlink:hover,.md-typeset :target>.headerlink{color:var(--md-accent-fg-color)}.md-typeset :target{scroll-margin-top:3.6rem}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset :target{scroll-margin-top:6rem}}.md-typeset h1:target,.md-typeset h2:target,.md-typeset h3:target{scroll-margin-top:0}.md-typeset h1:target:before,.md-typeset h2:target:before,.md-typeset h3:target:before{content:"";display:block;margin-top:-3.4rem;padding-top:3.4rem}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h1:target,.md-header--lifted~.md-container .md-typeset h2:target,.md-header--lifted~.md-container .md-typeset h3:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h1:target:before,.md-header--lifted~.md-container .md-typeset h2:target:before,.md-header--lifted~.md-container .md-typeset h3:target:before{margin-top:-5.8rem;padding-top:5.8rem}}.md-typeset h4:target{scroll-margin-top:0}.md-typeset h4:target:before{content:"";display:block;margin-top:-3.45rem;padding-top:3.45rem}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h4:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h4:target:before{margin-top:-5.85rem;padding-top:5.85rem}}.md-typeset h5:target,.md-typeset h6:target{scroll-margin-top:0}.md-typeset h5:target:before,.md-typeset h6:target:before{content:"";display:block;margin-top:-3.6rem;padding-top:3.6rem}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset h5:target,.md-header--lifted~.md-container .md-typeset h6:target{scroll-margin-top:0}.md-header--lifted~.md-container .md-typeset h5:target:before,.md-header--lifted~.md-container .md-typeset h6:target:before{margin-top:-6rem;padding-top:6rem}}.md-typeset div.arithmatex{overflow:auto}@media screen and (max-width:44.9375em){.md-typeset div.arithmatex{margin:0 -.8rem}}.md-typeset div.arithmatex>*{margin:1em auto!important;padding:0 .8rem;touch-action:auto;width:-webkit-min-content;width:-moz-min-content;width:min-content}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:var(--md-typeset-del-color)}.md-typeset ins.critic{background-color:var(--md-typeset-ins-color)}.md-typeset .critic.comment{color:var(--md-code-hl-comment-color)}.md-typeset .critic.comment:before{content:"/* "}.md-typeset .critic.comment:after{content:" */"}.md-typeset .critic.block{box-shadow:none;display:block;margin:1em 0;overflow:auto;padding-left:.8rem;padding-right:.8rem}.md-typeset .critic.block>:first-child{margin-top:.5em}.md-typeset .critic.block>:last-child{margin-bottom:.5em}:root{--md-details-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset details{display:flow-root;overflow:visible;padding-top:0}.md-typeset details[open]>summary:after{transform:rotate(90deg)}.md-typeset details:not([open]){box-shadow:none;padding-bottom:0}.md-typeset details:not([open])>summary{border-radius:.1rem}.md-typeset summary{border-top-left-radius:.1rem;border-top-right-radius:.1rem;cursor:pointer;display:block;min-height:1rem;padding:.4rem 1.8rem .4rem 2rem}[dir=rtl] .md-typeset summary{padding:.4rem 2.2rem .4rem 1.8rem}.md-typeset summary:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-typeset summary:after{background-color:currentColor;content:"";height:1rem;-webkit-mask-image:var(--md-details-icon);mask-image:var(--md-details-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;right:.4rem;top:.4rem;transform:rotate(0deg);transition:transform .25s;width:1rem}[dir=rtl] .md-typeset summary:after{left:.4rem;right:auto;transform:rotate(180deg)}.md-typeset summary::-webkit-details-marker,.md-typeset summary::marker{display:none}.md-typeset .emojione,.md-typeset .gemoji,.md-typeset .twemoji{display:inline-flex;height:1.125em;vertical-align:text-top}.md-typeset .emojione svg,.md-typeset .gemoji svg,.md-typeset .twemoji svg{fill:currentColor;max-height:100%;width:1.125em}.highlight .o,.highlight .ow{color:var(--md-code-hl-operator-color)}.highlight .p{color:var(--md-code-hl-punctuation-color)}.highlight .cpf,.highlight .l,.highlight .s,.highlight .s1,.highlight .s2,.highlight .sb,.highlight .sc,.highlight .si,.highlight .ss{color:var(--md-code-hl-string-color)}.highlight .cp,.highlight .se,.highlight .sh,.highlight .sr,.highlight .sx{color:var(--md-code-hl-special-color)}.highlight .il,.highlight .m,.highlight .mb,.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:var(--md-code-hl-number-color)}.highlight .k,.highlight .kd,.highlight .kn,.highlight .kp,.highlight .kr,.highlight .kt{color:var(--md-code-hl-keyword-color)}.highlight .kc,.highlight .n{color:var(--md-code-hl-name-color)}.highlight .bp,.highlight .nb,.highlight .no{color:var(--md-code-hl-constant-color)}.highlight .nc,.highlight .ne,.highlight .nf,.highlight .nn{color:var(--md-code-hl-function-color)}.highlight .nd,.highlight .ni,.highlight .nl,.highlight .nt{color:var(--md-code-hl-keyword-color)}.highlight .c,.highlight .c1,.highlight .ch,.highlight .cm,.highlight .cs,.highlight .sd{color:var(--md-code-hl-comment-color)}.highlight .na,.highlight .nv,.highlight .vc,.highlight .vg,.highlight .vi{color:var(--md-code-hl-variable-color)}.highlight .ge,.highlight .gh,.highlight .go,.highlight .gp,.highlight .gr,.highlight .gs,.highlight .gt,.highlight .gu{color:var(--md-code-hl-generic-color)}.highlight .gd,.highlight .gi{border-radius:.1rem;margin:0 -.125em;padding:0 .125em}.highlight .gd{background-color:var(--md-typeset-del-color)}.highlight .gi{background-color:var(--md-typeset-ins-color)}.highlight .hll{background-color:var(--md-code-hl-color);display:block;margin:0 -1.1764705882em;padding:0 1.1764705882em}.highlight [data-linenos]:before{background-color:var(--md-code-bg-color);box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;color:var(--md-default-fg-color--light);content:attr(data-linenos);float:left;left:-1.1764705882em;margin-left:-1.1764705882em;margin-right:1.1764705882em;padding-left:1.1764705882em;position:-webkit-sticky;position:sticky;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;z-index:3}.highlighttable{display:flow-root}.highlighttable tbody,.highlighttable td{display:block;padding:0}.highlighttable tr{display:flex}.highlighttable pre{margin:0}.highlighttable .linenos{background-color:var(--md-code-bg-color);font-size:.85em;padding:.7720588235em 0 .7720588235em 1.1764705882em;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.highlighttable .linenodiv{box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;padding-right:.5882352941em}.highlighttable .linenodiv pre{color:var(--md-default-fg-color--light);text-align:right}.highlighttable .code{flex:1;min-width:0}.md-typeset .highlighttable{border-radius:.1rem;direction:ltr;margin:1em 0}.md-typeset .highlighttable code{border-radius:0}@media screen and (max-width:44.9375em){.md-typeset.md-content__inner>.highlight{margin:1em -.8rem}.md-typeset.md-content__inner>.highlight .hll{margin:0 -.8rem;padding:0 .8rem}.md-typeset.md-content__inner>.highlight code{border-radius:0}.md-typeset>.highlighttable{border-radius:0;margin:1em -.8rem}.md-typeset>.highlighttable .hll{margin:0 -.8rem;padding:0 .8rem}}.md-typeset .keys kbd:after,.md-typeset .keys kbd:before{-moz-osx-font-smoothing:initial;-webkit-font-smoothing:initial;color:inherit;margin:0;position:relative}.md-typeset .keys span{color:var(--md-default-fg-color--light);padding:0 .2em}.md-typeset .keys .key-alt:before{content:"⎇";padding-right:.4em}.md-typeset .keys .key-left-alt:before{content:"⎇";padding-right:.4em}.md-typeset .keys .key-right-alt:before{content:"⎇";padding-right:.4em}.md-typeset .keys .key-command:before{content:"⌘";padding-right:.4em}.md-typeset .keys .key-left-command:before{content:"⌘";padding-right:.4em}.md-typeset .keys .key-right-command:before{content:"⌘";padding-right:.4em}.md-typeset .keys .key-control:before{content:"⌃";padding-right:.4em}.md-typeset .keys .key-left-control:before{content:"⌃";padding-right:.4em}.md-typeset .keys .key-right-control:before{content:"⌃";padding-right:.4em}.md-typeset .keys .key-meta:before{content:"◆";padding-right:.4em}.md-typeset .keys .key-left-meta:before{content:"◆";padding-right:.4em}.md-typeset .keys .key-right-meta:before{content:"◆";padding-right:.4em}.md-typeset .keys .key-option:before{content:"⌥";padding-right:.4em}.md-typeset .keys .key-left-option:before{content:"⌥";padding-right:.4em}.md-typeset .keys .key-right-option:before{content:"⌥";padding-right:.4em}.md-typeset .keys .key-shift:before{content:"⇧";padding-right:.4em}.md-typeset .keys .key-left-shift:before{content:"⇧";padding-right:.4em}.md-typeset .keys .key-right-shift:before{content:"⇧";padding-right:.4em}.md-typeset .keys .key-super:before{content:"❖";padding-right:.4em}.md-typeset .keys .key-left-super:before{content:"❖";padding-right:.4em}.md-typeset .keys .key-right-super:before{content:"❖";padding-right:.4em}.md-typeset .keys .key-windows:before{content:"⊞";padding-right:.4em}.md-typeset .keys .key-left-windows:before{content:"⊞";padding-right:.4em}.md-typeset .keys .key-right-windows:before{content:"⊞";padding-right:.4em}.md-typeset .keys .key-arrow-down:before{content:"↓";padding-right:.4em}.md-typeset .keys .key-arrow-left:before{content:"←";padding-right:.4em}.md-typeset .keys .key-arrow-right:before{content:"→";padding-right:.4em}.md-typeset .keys .key-arrow-up:before{content:"↑";padding-right:.4em}.md-typeset .keys .key-backspace:before{content:"⌫";padding-right:.4em}.md-typeset .keys .key-backtab:before{content:"⇤";padding-right:.4em}.md-typeset .keys .key-caps-lock:before{content:"⇪";padding-right:.4em}.md-typeset .keys .key-clear:before{content:"⌧";padding-right:.4em}.md-typeset .keys .key-context-menu:before{content:"☰";padding-right:.4em}.md-typeset .keys .key-delete:before{content:"⌦";padding-right:.4em}.md-typeset .keys .key-eject:before{content:"⏏";padding-right:.4em}.md-typeset .keys .key-end:before{content:"⤓";padding-right:.4em}.md-typeset .keys .key-escape:before{content:"⎋";padding-right:.4em}.md-typeset .keys .key-home:before{content:"⤒";padding-right:.4em}.md-typeset .keys .key-insert:before{content:"⎀";padding-right:.4em}.md-typeset .keys .key-page-down:before{content:"⇟";padding-right:.4em}.md-typeset .keys .key-page-up:before{content:"⇞";padding-right:.4em}.md-typeset .keys .key-print-screen:before{content:"⎙";padding-right:.4em}.md-typeset .keys .key-tab:after{content:"⇥";padding-left:.4em}.md-typeset .keys .key-num-enter:after{content:"⌤";padding-left:.4em}.md-typeset .keys .key-enter:after{content:"⏎";padding-left:.4em}.md-typeset .tabbed-content{box-shadow:0 -.05rem var(--md-default-fg-color--lightest);display:none;order:99;width:100%}@media print{.md-typeset .tabbed-content{display:block;order:0}}.md-typeset .tabbed-content>.highlight:only-child pre,.md-typeset .tabbed-content>.highlighttable:only-child,.md-typeset .tabbed-content>pre:only-child{margin:0}.md-typeset .tabbed-content>.highlight:only-child pre>code,.md-typeset .tabbed-content>.highlighttable:only-child>code,.md-typeset .tabbed-content>pre:only-child>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-content>.tabbed-set{margin:0}.md-typeset .tabbed-set{border-radius:.1rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.md-typeset .tabbed-set>input{height:0;opacity:0;position:absolute;width:0}.md-typeset .tabbed-set>input:checked+label{border-color:var(--md-accent-fg-color);color:var(--md-accent-fg-color)}.md-typeset .tabbed-set>input:checked+label+.tabbed-content{display:block}.md-typeset .tabbed-set>input:focus+label{outline-color:var(--md-accent-fg-color);outline-style:auto}.md-typeset .tabbed-set>input:not(.focus-visible)+label{-webkit-tap-highlight-color:transparent;outline:none}.md-typeset .tabbed-set>label{border-bottom:.1rem solid transparent;color:var(--md-default-fg-color--light);cursor:pointer;font-size:.64rem;font-weight:700;padding:.9375em 1.25em .78125em;transition:color .25s;width:auto;z-index:1}.no-js .md-typeset .tabbed-set>label{transition:none}.md-typeset .tabbed-set>label:hover{color:var(--md-accent-fg-color)}:root{--md-tasklist-icon:url('data:image/svg+xml;charset=utf-8,');--md-tasklist-icon--checked:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .task-list-item{list-style-type:none;position:relative}.md-typeset .task-list-item [type=checkbox]{left:-2em;position:absolute;top:.45em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{left:auto;right:-2em}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}.md-typeset .task-list-indicator:before{background-color:var(--md-default-fg-color--lightest);content:"";height:1.25em;left:-1.5em;-webkit-mask-image:var(--md-tasklist-icon);mask-image:var(--md-tasklist-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.15em;width:1.25em}[dir=rtl] .md-typeset .task-list-indicator:before{left:auto;right:-1.5em}.md-typeset [type=checkbox]:checked+.task-list-indicator:before{background-color:#00e676;-webkit-mask-image:var(--md-tasklist-icon--checked);mask-image:var(--md-tasklist-icon--checked)}:root>*{--md-mermaid-font-family:var(--md-text-font-family) sans-serif;--md-mermaid-edge-color:var(--md-default-fg-color);--md-mermaid-node-bg-color:var(--md-accent-fg-color--transparent);--md-mermaid-node-fg-color:var(--md-accent-fg-color);--md-mermaid-label-bg-color:var(--md-default-bg-color);--md-mermaid-label-fg-color:var(--md-default-fg-color)}@media screen and (min-width:45em){.md-typeset .inline{float:left;margin-bottom:.8rem;margin-right:.8rem;margin-top:0;width:11.7rem}[dir=rtl] .md-typeset .inline{float:right;margin-left:.8rem;margin-right:0}.md-typeset .inline.end{float:right;margin-left:.8rem;margin-right:0}[dir=rtl] .md-typeset .inline.end{float:left;margin-left:0;margin-right:.8rem}} \ No newline at end of file diff --git a/1.3/assets/stylesheets/palette.73e53a79.min.css b/1.3/assets/stylesheets/palette.73e53a79.min.css new file mode 100644 index 00000000..03a02114 --- /dev/null +++ b/1.3/assets/stylesheets/palette.73e53a79.min.css @@ -0,0 +1 @@ +[data-md-color-accent=red]{--md-accent-fg-color:#ff1947;--md-accent-fg-color--transparent:rgba(255,25,71,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=pink]{--md-accent-fg-color:#f50056;--md-accent-fg-color--transparent:rgba(245,0,86,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=purple]{--md-accent-fg-color:#df41fb;--md-accent-fg-color--transparent:rgba(223,65,251,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=deep-purple]{--md-accent-fg-color:#7c4dff;--md-accent-fg-color--transparent:rgba(124,77,255,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=indigo]{--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:rgba(82,108,254,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=blue]{--md-accent-fg-color:#4287ff;--md-accent-fg-color--transparent:rgba(66,135,255,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=light-blue]{--md-accent-fg-color:#0091eb;--md-accent-fg-color--transparent:rgba(0,145,235,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=cyan]{--md-accent-fg-color:#00bad6;--md-accent-fg-color--transparent:rgba(0,186,214,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=teal]{--md-accent-fg-color:#00bda4;--md-accent-fg-color--transparent:rgba(0,189,164,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=green]{--md-accent-fg-color:#00c753;--md-accent-fg-color--transparent:rgba(0,199,83,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=light-green]{--md-accent-fg-color:#63de17;--md-accent-fg-color--transparent:rgba(99,222,23,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-accent=lime]{--md-accent-fg-color:#b0eb00;--md-accent-fg-color--transparent:rgba(176,235,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=yellow]{--md-accent-fg-color:#ffd500;--md-accent-fg-color--transparent:rgba(255,213,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=amber]{--md-accent-fg-color:#fa0;--md-accent-fg-color--transparent:rgba(255,170,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=orange]{--md-accent-fg-color:#ff9100;--md-accent-fg-color--transparent:rgba(255,145,0,0.1);--md-accent-bg-color:rgba(0,0,0,0.87);--md-accent-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-accent=deep-orange]{--md-accent-fg-color:#ff6e42;--md-accent-fg-color--transparent:rgba(255,110,66,0.1);--md-accent-bg-color:#fff;--md-accent-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=red]{--md-primary-fg-color:#ef5552;--md-primary-fg-color--light:#e57171;--md-primary-fg-color--dark:#e53734;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=pink]{--md-primary-fg-color:#e92063;--md-primary-fg-color--light:#ec417a;--md-primary-fg-color--dark:#c3185d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=purple]{--md-primary-fg-color:#ab47bd;--md-primary-fg-color--light:#bb69c9;--md-primary-fg-color--dark:#8c24a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=deep-purple]{--md-primary-fg-color:#7e56c2;--md-primary-fg-color--light:#9574cd;--md-primary-fg-color--dark:#673ab6;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=indigo]{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=blue]{--md-primary-fg-color:#2094f3;--md-primary-fg-color--light:#42a5f5;--md-primary-fg-color--dark:#1975d2;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=light-blue]{--md-primary-fg-color:#02a6f2;--md-primary-fg-color--light:#28b5f6;--md-primary-fg-color--dark:#0287cf;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=cyan]{--md-primary-fg-color:#00bdd6;--md-primary-fg-color--light:#25c5da;--md-primary-fg-color--dark:#0097a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=teal]{--md-primary-fg-color:#009485;--md-primary-fg-color--light:#26a699;--md-primary-fg-color--dark:#007a6c;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=green]{--md-primary-fg-color:#4cae4f;--md-primary-fg-color--light:#68bb6c;--md-primary-fg-color--dark:#398e3d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=light-green]{--md-primary-fg-color:#8bc34b;--md-primary-fg-color--light:#9ccc66;--md-primary-fg-color--dark:#689f38;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=lime]{--md-primary-fg-color:#cbdc38;--md-primary-fg-color--light:#d3e156;--md-primary-fg-color--dark:#b0b52c;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=yellow]{--md-primary-fg-color:#ffec3d;--md-primary-fg-color--light:#ffee57;--md-primary-fg-color--dark:#fbc02d;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=amber]{--md-primary-fg-color:#ffc105;--md-primary-fg-color--light:#ffc929;--md-primary-fg-color--dark:#ffa200;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=orange]{--md-primary-fg-color:#ffa724;--md-primary-fg-color--light:#ffa724;--md-primary-fg-color--dark:#fa8900;--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54)}[data-md-color-primary=deep-orange]{--md-primary-fg-color:#ff6e42;--md-primary-fg-color--light:#ff8a66;--md-primary-fg-color--dark:#f4511f;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=brown]{--md-primary-fg-color:#795649;--md-primary-fg-color--light:#8d6e62;--md-primary-fg-color--dark:#5d4037;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=grey]{--md-primary-fg-color:#757575;--md-primary-fg-color--light:#9e9e9e;--md-primary-fg-color--dark:#616161;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=blue-grey]{--md-primary-fg-color:#546d78;--md-primary-fg-color--light:#607c8a;--md-primary-fg-color--dark:#455a63;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7)}[data-md-color-primary=white]{--md-primary-fg-color:#fff;--md-primary-fg-color--light:hsla(0,0%,100%,0.7);--md-primary-fg-color--dark:rgba(0,0,0,0.07);--md-primary-bg-color:rgba(0,0,0,0.87);--md-primary-bg-color--light:rgba(0,0,0,0.54);--md-typeset-a-color:#4051b5}@media screen and (min-width:60em){[data-md-color-primary=white] .md-search__form{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__form:hover{background-color:rgba(0,0,0,.32)}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:rgba(0,0,0,.87)}}@media screen and (min-width:76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid rgba(0,0,0,.07)}}[data-md-color-primary=black]{--md-primary-fg-color:#000;--md-primary-fg-color--light:rgba(0,0,0,0.54);--md-primary-fg-color--dark:#000;--md-primary-bg-color:#fff;--md-primary-bg-color--light:hsla(0,0%,100%,0.7);--md-typeset-a-color:#4051b5}[data-md-color-primary=black] .md-header{background-color:#000}@media screen and (max-width:59.9375em){[data-md-color-primary=black] .md-nav__source{background-color:rgba(0,0,0,.87)}}@media screen and (min-width:60em){[data-md-color-primary=black] .md-search__form{background-color:hsla(0,0%,100%,.12)}[data-md-color-primary=black] .md-search__form:hover{background-color:hsla(0,0%,100%,.3)}}@media screen and (max-width:76.1875em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:#000}}@media screen and (min-width:76.25em){[data-md-color-primary=black] .md-tabs{background-color:#000}}@media screen{[data-md-color-scheme=slate]{--md-hue:232;--md-default-fg-color:hsla(var(--md-hue),75%,95%,1);--md-default-fg-color--light:hsla(var(--md-hue),75%,90%,0.62);--md-default-fg-color--lighter:hsla(var(--md-hue),75%,90%,0.32);--md-default-fg-color--lightest:hsla(var(--md-hue),75%,90%,0.12);--md-default-bg-color:hsla(var(--md-hue),15%,21%,1);--md-default-bg-color--light:hsla(var(--md-hue),15%,21%,0.54);--md-default-bg-color--lighter:hsla(var(--md-hue),15%,21%,0.26);--md-default-bg-color--lightest:hsla(var(--md-hue),15%,21%,0.07);--md-code-fg-color:hsla(var(--md-hue),18%,86%,1);--md-code-bg-color:hsla(var(--md-hue),15%,15%,1);--md-code-hl-color:rgba(66,135,255,0.15);--md-code-hl-number-color:#e6695b;--md-code-hl-special-color:#f06090;--md-code-hl-function-color:#c973d9;--md-code-hl-constant-color:#9383e2;--md-code-hl-keyword-color:#6791e0;--md-code-hl-string-color:#2fb170;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:rgba(66,135,255,0.3);--md-typeset-kbd-color:hsla(var(--md-hue),15%,94%,0.12);--md-typeset-kbd-accent-color:hsla(var(--md-hue),15%,94%,0.2);--md-typeset-kbd-border-color:hsla(var(--md-hue),15%,14%,1);--md-admonition-bg-color:hsla(var(--md-hue),0%,100%,0.025);--md-footer-bg-color:hsla(var(--md-hue),15%,12%,0.87);--md-footer-bg-color--dark:hsla(var(--md-hue),15%,10%,1)}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color:#5d6cc0}} \ No newline at end of file diff --git a/1.3/change-log/index.html b/1.3/change-log/index.html new file mode 100644 index 00000000..3b92da09 --- /dev/null +++ b/1.3/change-log/index.html @@ -0,0 +1,2359 @@ + + + + + + + + + + + + + + + + + + + + + + Change Log - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Change Log

+

v1.3.0

+
    +
  • Refactor PHONE_LOCATIONS DORYAB provider. Fix bugs and faster execution up to 30x
  • +
  • New PHONE_KEYBOARD features
  • +
  • Add a new strategy to infer home location that can handle multiple homes for the same participant
  • +
  • Add module to exclude sleep episodes from steps intraday features
  • +
  • Fix PID matching when joining data from multiple participants. Now, we can handle PIDS with an arbitrary format.
  • +
  • Fix bug that did not correctly parse participants with more than 2 phones or more than 1 wearable
  • +
  • Fix crash when no phone data yield is needed to process location data (ALL & GPS location providers)
  • +
  • Remove location rows with the same timestamp based on their accuracy
  • +
  • Fix PHONE_CONVERSATION bug that produced inaccurate ratio features when time segments were not daily.
  • +
  • Other minor bug fixes
  • +
+

v1.2.0

+
    +
  • Sleep summary and intraday features are more consistent.
  • +
  • Add wake and bedtime features for sleep summary data.
  • +
  • Fix bugs with sleep PRICE features.
  • +
  • Update home page
  • +
  • Add contributing guide
  • +
+

v1.1.1

+
    +
  • Fix length of periodic segments on days with DLS
  • +
  • Fix crash when scraping data for an app that does not exist
  • +
  • Add tests for phone screen data
  • +
+

v1.1.0

+
    +
  • Add Fitbit calories intraday features
  • +
+

v1.0.1

+
    +
  • Fix crash in chunk_episodes of utils.py for multi time zone data
  • +
  • Fix crash in BT Doryab provider when the number of clusters is 2
  • +
  • Fix Fitbit multi time zone inference from phone data (simplify)
  • +
  • Fix missing columns when the input for phone data yield is empty
  • +
  • Fix wrong date time labels for event segments for multi time zone data (all labels are computed based on a single tz)
  • +
  • Fix periodic segment crash when there are no segments to assign (only affects wday, mday, qday, or yday)
  • +
  • Fix crash in Analysis Workflow with new suffix in segments’ labels
  • +
+

v1.0.0

+
    +
  • Add a new Overview page.
  • +
  • You can extend RAPIDS with your own data streams. Data streams are data collected with other sensing apps besides AWARE (like Beiwe, mindLAMP), and stored in other data containers (databases, files) besides MySQL.
  • +
  • Support to analyze Empatica wearable data (thanks to Joe Kim and Brinnae Bent from the DBDP)
  • +
  • Support to analyze AWARE data stored in CSV files and InfluxDB databases
  • +
  • Support to analyze data collected over multiple time zones
  • +
  • Support for sleep intraday features from the core team and also from the community (thanks to Stephen Price)
  • +
  • Users can comment on the documentation (powered by utterances).
  • +
  • SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT.
  • +
  • Add RAPIDS new logo
  • +
  • Move Citation and Minimal Example page to the Setup section
  • +
  • Add config.yaml validation schema and documentation. Now it’s more difficult to modify the config.yaml file with invalid values.
  • +
  • Add new time at home Doryab location feature
  • +
  • Add and home coordinates to the location data file so location providers can build features based on it.
  • +
  • If you are migrating from RAPIDS 0.4.3 or older, check this guide
  • +
+

v0.4.3

+
    +
  • Fix bug when any of the rows from any sensor do not belong a time segment
  • +
+

v0.4.2

+
    +
  • Update battery testing
  • +
  • Fix location processing bug when certain columns don’t exist
  • +
  • Fix HR intraday bug when minutesonZONE features were 0
  • +
  • Update FAQs
  • +
  • Fix HR summary bug when restinghr=0 (ignore those rows)
  • +
  • Fix ROG, location entropy and normalized entropy in Doryab location provider
  • +
  • Remove sampling frequency dependance in Doryab location provider
  • +
  • Update documentation of Doryab location provider
  • +
  • Add new FITBIT_DATA_YIELD RAPIDS provider
  • +
  • Deprecate Doryab circadian movement feature until it is fixed
  • +
+

v0.4.1

+
    +
  • Fix bug when no error message was displayed for an empty [PHONE_DATA_YIELD][SENSORS] when resampling location data
  • +
+

v0.4.0

+
    +
  • Add four new phone sensors that can be used for PHONE_DATA_YIELD
  • +
  • Add code so new feature providers can be added for the new four sensors
  • +
  • Add new clustering algorithm (OPTICS) for Doryab features
  • +
  • Update default EPS parameter for Doryab location clustering
  • +
  • Add clearer error message for invalid phone data yield sensors
  • +
  • Add ALL_RESAMPLED flag and accuracy limit for location features
  • +
  • Add FAQ about null characters in phone tables
  • +
  • Reactivate light and wifi tests and update testing docs
  • +
  • Fix bug when parsing Fitbit steps data
  • +
  • Fix bugs when merging features from empty time segments
  • +
  • Fix minor issues in the documentation
  • +
+

v0.3.2

+
    +
  • Update docker and linux instructions to use RSPM binary repo for for faster installation
  • +
  • Update CI to create a release on a tagged push that passes the tests
  • +
  • Clarify in DB credential configuration that we only support MySQL
  • +
  • Add Windows installation instructions
  • +
  • Fix bugs in the create_participants_file script
  • +
  • Fix bugs in Fitbit data parsing.
  • +
  • Fixed Doryab location features context of clustering.
  • +
  • Fixed the wrong shifting while calculating distance in Doryab location features.
  • +
  • Refactored the haversine function
  • +
+

v0.3.1

+
    +
  • Update installation docs for RAPIDS’ docker container
  • +
  • Fix example analysis use of accelerometer data in a plot
  • +
  • Update FAQ
  • +
  • Update minimal example documentation
  • +
  • Minor doc updates
  • +
+

v0.3.0

+
    +
  • Update R and Python virtual environments
  • +
  • Add GH actions CI support for tests and docker
  • +
  • Add release and test badges to README
  • +
+

v0.2.6

+
    +
  • Fix old versions banner on nested pages
  • +
+

v0.2.5

+
    +
  • Fix docs deploy typo
  • +
+

v0.2.4

+
    +
  • Fix broken links in landing page and docs deploy
  • +
+

v0.2.3

+
    +
  • Fix participant IDS in the example analysis workflow
  • +
+

v0.2.2

+
    +
  • Fix readme link to docs
  • +
+

v0.2.1

+
    +
  • FIx link to the most recent version in the old version banner
  • +
+

v0.2.0

+
    +
  • Add new PHONE_BLUETOOTH DORYAB provider
  • +
  • Deprecate PHONE_BLUETOOTH RAPIDS provider
  • +
  • Fix bug in filter_data_by_segment for Python when dataset was empty
  • +
  • Minor doc updates
  • +
  • New FAQ item
  • +
+

v0.1.0

+ + + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/citation/index.html b/1.3/citation/index.html new file mode 100644 index 00000000..de8a2916 --- /dev/null +++ b/1.3/citation/index.html @@ -0,0 +1,2071 @@ + + + + + + + + + + + + + + + + + + + + + + Citation - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +
+
+ + +
+
+ + + + + + + + +

Cite RAPIDS and providers

+
+

RAPIDS and the community

+

RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you!

+
+

RAPIDS

+

If you used RAPIDS, please cite this paper.

+
+

RAPIDS et al. citation

+

Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA +RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices +JMIR Preprints. 18/08/2020:23246 +DOI: 10.2196/preprints.23246 +URL: https://preprints.jmir.org/preprint/23246

+
+

DBDP (all Empatica sensors)

+

If you computed features using the provider [DBDP] of any of the Empatica sensors (accelerometer, heart rate, temperature, EDA, BVP, IBI, tags) cite this paper in addition to RAPIDS.

+
+

Bent et al. citation

+

Bent, B., Wang, K., Grzesiak, E., Jiang, C., Qi, Y., Jiang, Y., Cho, P., Zingler, K., Ogbeide, F.I., Zhao, A., Runge, R., Sim, I., Dunn, J. (2020). The Digital Biomarker Discovery Pipeline: An open source software platform for the development of digital biomarkers using mHealth and wearables data. Journal of Clinical and Translational Science, 1-28. doi:10.1017/cts.2020.511

+
+

Panda (accelerometer)

+

If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS.

+
+

Panda et al. citation

+

Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047.

+
+

Stachl (applications foreground)

+

If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS.

+
+

Stachl et al. citation

+

Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres Völkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus Bühner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117

+
+

Doryab (bluetooth)

+

If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS.

+
+

Doryab et al. citation

+

Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394

+
+

Barnett (locations)

+

If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS.

+
+

Barnett et al. citation

+

Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98–e112, https://doi.org/10.1093/biostatistics/kxy059

+
+
+

Canzian et al. citation

+

Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp ‘15). Association for Computing Machinery, New York, NY, USA, 1293–1304. DOI:https://doi.org/10.1145/2750858.2805845

+
+

Doryab (locations)

+

If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. In addition, if you used the SUN_LI_VEGA_STRATEGY strategy, cite this paper as well.

+
+

Doryab et al. citation

+

Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394

+
+
+

Canzian et al. citation

+

Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp ‘15). Association for Computing Machinery, New York, NY, USA, 1293–1304. DOI:https://doi.org/10.1145/2750858.2805845

+
+
+

Sun et al. citation

+

Sun S, Folarin AA, Ranjan Y, Rashid Z, Conde P, Stewart C, Cummins N, Matcham F, Dalla Costa G, Simblett S, Leocani L, Lamers F, Sørensen PS, Buron M, Zabalza A, Guerrero Pérez AI, Penninx BW, Siddi S, Haro JM, Myin-Germeys I, Rintala A, Wykes T, Narayan VA, Comi G, Hotopf M, Dobson RJ, RADAR-CNS Consortium. Using Smartphones and Wearable Devices to Monitor Behavioral Changes During COVID-19. J Med Internet Res 2020;22(9):e19992

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/code_of_conduct/index.html b/1.3/code_of_conduct/index.html new file mode 100644 index 00000000..37183806 --- /dev/null +++ b/1.3/code_of_conduct/index.html @@ -0,0 +1,2174 @@ + + + + + + + + + + + + + + + + + + + + + + Code of Conduct - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Contributor Covenant Code of Conduct

+

Our Pledge

+

We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation.

+

We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community.

+

Our Standards

+

Examples of behavior that contributes to a positive environment for our +community include:

+
    +
  • Demonstrating empathy and kindness toward other people
  • +
  • Being respectful of differing opinions, viewpoints, and experiences
  • +
  • Giving and gracefully accepting constructive feedback
  • +
  • Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience
  • +
  • Focusing on what is best not just for us as individuals, but for the + overall community
  • +
+

Examples of unacceptable behavior include:

+
    +
  • The use of sexualized language or imagery, and sexual attention or + advances of any kind
  • +
  • Trolling, insulting or derogatory comments, and personal or political attacks
  • +
  • Public or private harassment
  • +
  • Publishing others’ private information, such as a physical or email + address, without their explicit permission
  • +
  • Other conduct which could reasonably be considered inappropriate in a + professional setting
  • +
+

Enforcement Responsibilities

+

Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful.

+

Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate.

+

Scope

+

This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event.

+

Enforcement

+

Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +moshi@pitt.edu. +All complaints will be reviewed and investigated promptly and fairly.

+

All community leaders are obligated to respect the privacy and security of the +reporter of any incident.

+

Enforcement Guidelines

+

Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct:

+

1. Correction

+

Community Impact: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community.

+

Consequence: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested.

+

2. Warning

+

Community Impact: A violation through a single incident or series +of actions.

+

Consequence: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban.

+

3. Temporary Ban

+

Community Impact: A serious violation of community standards, including +sustained inappropriate behavior.

+

Consequence: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban.

+

4. Permanent Ban

+

Community Impact: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals.

+

Consequence: A permanent ban from any sort of public interaction within +the community.

+

Attribution

+

This Code of Conduct is adapted from the Contributor Covenant, +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.

+

Community Impact Guidelines were inspired by +Mozilla’s code of conduct enforcement ladder.

+

For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available +at https://www.contributor-covenant.org/translations.

+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/common-errors/index.html b/1.3/common-errors/index.html new file mode 100644 index 00000000..1423c3ab --- /dev/null +++ b/1.3/common-errors/index.html @@ -0,0 +1,2371 @@ + + + + + + + + + + + + + + + + + + + + + + Common Errors - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Common Errors

+

Cannot connect to your MySQL server

+
Problem
**Error in .local(drv, \...) :** **Failed to connect to database: Error:
+Can\'t initialize character set unknown (path: compiled\_in)** :
+
+Calls: dbConnect -> dbConnect -> .local -> .Call
+Execution halted
+[Tue Mar 10 19:40:15 2020]
+Error in rule download_dataset:
+    jobid: 531
+    output: data/raw/p60/locations_raw.csv
+
+RuleException:
+CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile:
+Command 'set -euo pipefail;  Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1.
+File "/home/ubuntu/rapids/rules/preprocessing.snakefile", line 20, in __rule_download_dataset
+File "/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py", line 57, in run
+Shutting down, this might take some time.
+Exiting because a job execution failed. Look above for error message
+
+
+
Solution

Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env.

+
+
+

Cannot start mysql in linux via brew services start mysql

+
Problem

Cannot start mysql in linux via brew services start mysql

+
+
Solution

Use mysql.server start

+
+
+

Every time I run force the download_dataset rule all rules are executed

+
Problem

When running snakemake -j1 -R pull_phone_data or ./rapids -j1 -R pull_phone_data all the rules and files are re-computed

+
+
Solution

This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed.

+
+
+

Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule.

+
Problem
Error in .local(conn, statement, ...) : 
+  could not run statement: Table 'db_name.table_name' doesn't exist
+Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call
+Execution halted
+
+
+
Solution

Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [CONTAINER] of each sensor you activated in config.yaml match your database tables or files.

+
+
+

How do I install RAPIDS on Ubuntu 16.04

+
Solution
    +
  1. +

    Install dependencies (Homebrew - if not installed):

    +
      +
    • sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev
    • +
    • Install brew for linux and add the following line to ~/.bashrc: export PATH=$HOME/.linuxbrew/bin:$PATH
    • +
    • source ~/.bashrc
    • +
    +
  2. +
  3. +

    Install MySQL

    +
      +
    • brew install mysql
    • +
    • brew services start mysql
    • +
    +
  4. +
  5. +

    Install R, pandoc and rmarkdown:

    +
      +
    • brew install r
    • +
    • brew install gcc@6 (needed due to this bug)
    • +
    • HOMEBREW_CC=gcc-6 brew install pandoc
    • +
    +
  6. +
  7. +

    Install miniconda using these instructions

    +
  8. +
  9. +

    Clone our repo:

    +
      +
    • git clone https://github.com/carissalow/rapids
    • +
    +
  10. +
  11. +

    Create a python virtual environment:

    +
      +
    • cd rapids
    • +
    • conda env create -f environment.yml -n MY_ENV_NAME
    • +
    • conda activate MY_ENV_NAME
    • +
    +
  12. +
  13. +

    Install R packages and virtual environment:

    +
      +
    • snakemake renv_install
    • +
    • snakemake renv_init
    • +
    • snakemake renv_restore
    • +
    +

    This step could take several minutes to complete. Please be patient and let it run until completion.

    +
  14. +
+
+
+

mysql.h cannot be found

+
Problem
--------------------------[ ERROR MESSAGE ]----------------------------
+<stdin>:1:10: fatal error: mysql.h: No such file or directory
+compilation terminated.
+-----------------------------------------------------------------------
+ERROR: configuration failed for package 'RMySQL'
+
+
+
Solution
sudo apt install libmariadbclient-dev
+
+
+
+

No package libcurl found

+
Problem

libcurl cannot be found

+
+
Solution

Install libcurl +

sudo apt install libcurl4-openssl-dev
+

+
+
+

Configuration failed because openssl was not found.

+
Problem

openssl cannot be found

+
+
Solution

Install openssl +

sudo apt install libssl-dev
+

+
+
+

Configuration failed because libxml-2.0 was not found

+
Problem

libxml-2.0 cannot be found

+
+
Solution

Install libxml-2.0 +

sudo apt install libxml2-dev
+

+
+
+

SSL connection error when running RAPIDS

+
Problem

You are getting the following error message when running RAPIDS: +

Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol.
+

+
+
Solution

This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector.

+

If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here

+
+
+

DB_TABLES key not found

+
Problem

If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS', it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS

+
+
Solution

You need to add or remove any leading whitespaces as needed on that line.

+
PHONE_VALID_SENSED_BINS:
+    COMPUTE: False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features
+    BIN_SIZE: &bin_size 5 # (in minutes)
+    PHONE_SENSORS: []
+
+
+
+

Error while updating your conda environment in Ubuntu

+
Problem

You get the following error: +

CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003
+    appears to be corrupted. The path 'include/mysqlStubs.h'
+    specified in the package manifest cannot be found.
+ClobberError: This transaction has incompatible packages due to a shared path.
+    packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243
+    path: 'lib/libiomp5.so'
+

+
+
Solution

Reinstall conda

+
+

Embedded nul in string

+
Problem

You get the following error when downloading sensor data: +

Error in result_fetch(res@ptr, n = n) : 
+  embedded nul in string:
+

+
+
Solution

This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue). Since it seems this problem won’t be handled by RMariaDB, you have two options:

+
    +
  1. Remove the the null character from the conflictive table cell(s). You can adapt the following query on a MySQL server 8.0 or older +
    update YOUR_TABLE set YOUR_COLUMN = regexp_replace(YOUR_COLUMN, '\0', '');
    +
  2. +
  3. If it’s not feasible to modify your data you can try swapping RMariaDB with RMySQL. Just have in mind you might have problems connecting to modern MySQL servers running in Linux:
      +
    • Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder +
      R -e 'renv::install("RMySQL")'
      +
    • +
    • Go to src/data/streams/pull_phone_data.R or src/data/streams/pull_fitbit_data.R and replace library(RMariaDB) with library(RMySQL)
    • +
    • In the same file(s) replace dbEngine <- dbConnect(MariaDB(), default.file = "./.env", group = group) with dbEngine <- dbConnect(MySQL(), default.file = "./.env", group = group)
    • +
    +
  4. +
+
+

There is no package called RMariaDB

+
Problem

You get the following error when executing RAPIDS: +

Error in library(RMariaDB) : there is no package called 'RMariaDB'
+Execution halted
+

+
+
Solution

In RAPIDS v0.1.0 we replaced RMySQL R package with RMariaDB, this error means your R virtual environment is out of date, to update it run snakemake -j1 renv_restore

+
+

Unrecognized output timezone “America/New_York”

+
Problem

When running RAPIDS with R 4.0.3 on MacOS on M1, lubridate may throw an error associated with the timezone. +

Error in C_force_tz(time, tz = tzone, roll):
+   CCTZ: Unrecognized output timezone: "America/New_York"
+Calls: get_timestamp_filter ... .parse_date_time -> .strptime -> force_tz -> C_force_tz
+

+
+
Solution
+

This is because R timezone library is not set. Please add Sys.setenv(“TZDIR” = file.path(R.home(), “share”, “zoneinfo”)) to the file active.R in renv folder to set the timezone library. For further details on how to test if TZDIR is properly set, please refer to https://github.com/tidyverse/lubridate/issues/928#issuecomment-720059233.

+

Unimplemented MAX_NO_FIELD_TYPES

+
Problem

You get the following error when downloading Fitbit data: +

Error: Unimplemented MAX_NO_FIELD_TYPES
+Execution halted
+

+
+
Solution

At the moment RMariaDB cannot handle MySQL columns of JSON type. Change the type of your Fitbit data column to longtext (note that the content will not change and will still be a JSON object just interpreted as a string).

+
+

Running RAPIDS on Apple Silicon M1 Mac

+
Problem

You get the following error when installing pandoc or running rapids: +

MoSHI/rapids/renv/staging/1/00LOCK-KernSmooth/00new/KernSmooth/libs/KernSmooth.so: mach-0, but wrong architecture
+

+
+
Solution

As of Feb 2020 in M1 macs, R needs to be installed via brew under Rosetta (x86 arch) due to some incompatibility with selected R libraries. To do this, run your terminal via Rosetta, then proceed with the usual brew installation command. x86 homebrew should be installed in /usr/local/bin/brew, you can check which brew you are using by typing which brew. Then use x86 homebrew to install R and restore RAPIDS packages (renv_restore).

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/contributing/index.html b/1.3/contributing/index.html new file mode 100644 index 00000000..567c79a6 --- /dev/null +++ b/1.3/contributing/index.html @@ -0,0 +1,2073 @@ + + + + + + + + + + + + + + + + + + + + + + Contributing - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +
+
+ + +
+
+ + + + + + + + +

Contributing

+

Thank you for taking the time to contribute!

+

All changes, small or big, are welcome, and regardless of who you are, we are always happy to work together to make your contribution as strong as possible. We follow the Covenant Code of Conduct, so we ask you to uphold it. Be kind to everyone in the community, and please report unacceptable behavior to moshiresearch@gmail.com.

+

Questions, Feature Requests, and Discussions

+

Post any questions, feature requests, or discussions in our GitHub Discussions tab.

+

Bug Reports

+

Report any bugs in our GithHub issue tracker keeping in mind to:

+
    +
  • Debug and simplify the problem to create a minimal example. For example, reduce the problem to a single participant, sensor, and a few rows of data.
  • +
  • Provide a clear and succinct description of the problem (expected behavior vs. actual behavior).
  • +
  • Attach your config.yaml, time segments file, and time zones file if appropriate.
  • +
  • Attach test data if possible and any screenshots or extra resources that will help us debug the problem.
  • +
  • Share the commit you are running: git rev-parse --short HEAD
  • +
  • Share your OS version (e.g., Windows 10)
  • +
  • Share the device/sensor you are processing (e.g., phone accelerometer)
  • +
+

Documentation Contributions

+

If you want to fix a typo or any other minor changes, you can edit the file online by clicking on the pencil icon at the top right of any page and opening a pull request using Github’s website

+

If your changes are more complex, clone RAPIDS’ repository, setup the dev environment for our documentation with this tutorial, and submit any changes on a new feature branch following our git flow.

+

Code Contributions

+
+

Hints for any code changes

+
    +
  • To submit any new code, use a new feature branch following our git flow.
  • +
  • If you neeed a new Python or R package in RAPIDS’ virtual environments, follow this tutorial
  • +
  • If you need to change the config.yaml you will need to update its validation schema with this tutorial
  • +
+
+

New Data Streams

+

New data containers. If you want to process data from a device RAPIDS supports (see this table) but it’s stored in a database engine or file type we don’t support yet, implement a new data stream container and format. You can copy and paste the format.yaml of one of the other streams of the device you are targeting.

+

New sensing apps. If you want to add support for new smartphone sensing apps like Beiwe, implement a new data stream container and format.

+

New wearable devices. If you want to add support for a new wearable, open a Github discussion, so we can add the necessary initial configuration files and code.

+

New Behavioral Features

+

If you want to add new behavioral features for mobile sensors RAPIDS already supports, follow this tutorial. A sensor is supported if it has a configuration section in config.yaml.

+

If you want to add new behavioral features for mobile sensors RAPIDS does not support yet, open a Github discussion, so we can add the necessary initial configuration files and code.

+

New Tests

+

If you want to add new tests for existent behavioral features, follow this tutorial.

+

New Visualizations

+

Open a Github discussion, so we can add the necessary initial configuration files and code.

+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/add-new-data-streams/index.html b/1.3/datastreams/add-new-data-streams/index.html new file mode 100644 index 00000000..9199d8ee --- /dev/null +++ b/1.3/datastreams/add-new-data-streams/index.html @@ -0,0 +1,2357 @@ + + + + + + + + + + + + + + + + + + + + + + Add New Data Streams - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Add New Data Streams

+

A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container. RAPIDS is agnostic to data streams’ formats and container; see the Data Streams Introduction for a list of supported streams.

+

A container is queried with an R or Python script that connects to the database, API or file where your stream’s raw data is stored.

+

A format is described using a format.yaml file that specifies how to map and mutate your stream’s raw data to match the data and format RAPIDS needs.

+

The most common cases when you would want to implement a new data stream are:

+
    +
  • You collected data with a mobile sensing app RAPIDS does not support yet. For example, Beiwe data stored in MySQL. You will need to define a new format file and a new container script.
  • +
  • You collected data with a mobile sensing app RAPIDS supports, but this data is stored in a container that RAPIDS can’t connect to yet. For example, AWARE data stored in PostgreSQL. In this case, you can reuse the format file of the aware_mysql stream, but you will need to implement a new container script.
  • +
+
+

Hint

+

Both the container.[R|py] and the format.yaml are stored in ./src/data/streams/[stream_name] where [stream_name] can be aware_mysql for example.

+
+

Implement a Container

+

The container script of a data stream can be implemented in R (strongly recommended) or python. This script must have two functions if you are implementing a stream for phone data or one function otherwise. The script can contain other auxiliary functions.

+

First of all, add any parameters your script might need in config.yaml under (device)_DATA_STREAMS. These parameters will be available in the stream_parameters argument of the one or two functions you implement. For example, if you are adding support for Beiwe data stored in PostgreSQL and your container needs a set of credentials to connect to a database, your new data stream configuration would be:

+
PHONE_DATA_STREAMS:
+  USE: aware_python
+
+  # AVAILABLE:
+  aware_mysql: 
+    DATABASE_GROUP: MY_GROUP
+  beiwe_postgresql: 
+    DATABASE_GROUP: MY_GROUP # users define this group (user, password, host, etc.) in credentials.yaml
+
+

Then implement one or both of the following functions:

+
+

This function returns the data columns for a specific sensor and participant. It has the following parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ParamDescription
stream_parametersAny parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml. For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql]
sensor_containerThe value set by the user in any [DEVICE_SENSOR][CONTAINER] key of config.yaml. It can be a table, file path, or whatever data source you want to support that contains the data from a single sensor for all participants. For example, [PHONE_ACCELEROMETER][CONTAINER]
deviceThe device id that you need to get the data for (this is set by the user in the participant files). For example, in AWARE this device id is a uuid
columnsA list of the columns that you need to get from sensor_container. You specify these columns in your stream’s format.yaml
+
+

Example

+

This is the pull_data function we implemented for aware_mysql. Note that we can message, warn or stop the user during execution.

+
pull_data <- function(stream_parameters, device, sensor_container, columns){
+    # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R
+    dbEngine <- get_db_engine(stream_parameters$DATABASE_GROUP)
+    query <- paste0("SELECT ", paste(columns, collapse = ",")," FROM ", sensor_container, " WHERE device_id = '", device,"'")
+    # Letting the user know what we are doing
+    message(paste0("Executing the following query to download data: ", query)) 
+    sensor_data <- dbGetQuery(dbEngine, query)
+
+    dbDisconnect(dbEngine)
+
+    if(nrow(sensor_data) == 0)
+        warning(paste("The device '", device,"' did not have data in ", sensor_container))
+
+    return(sensor_data)
+}
+
+
+
+
+
+

Warning

+

This function is only necessary for phone data streams.

+
+

RAPIDS allows users to use the keyword infer (previously multiple) to automatically infer the mobile Operative System a phone was running.

+

If you have a way to infer the OS of a device id, implement this function. For example, for AWARE data we use the aware_device table.

+

If you don’t have a way to infer the OS, call stop("Error Message") so other users know they can’t use infer or the inference failed, and they have to assign the OS manually in the participant file.

+

This function returns the operative system (android or ios) for a specific phone device id. It has the following parameters:

+ + + + + + + + + + + + + + + + + +
ParamDescription
stream_parametersAny parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml. For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql]
deviceThe device id that you need to infer the OS for (this is set by the user in the participant files). For example, in AWARE this device id is a uuid
+
+

Example

+

This is the infer_device_os function we implemented for aware_mysql. Note that we can message, warn or stop the user during execution.

+
infer_device_os <- function(stream_parameters, device){
+    # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R
+    group <- stream_parameters$DATABASE_GROUP
+
+    dbEngine <- dbConnect(MariaDB(), default.file = "./.env", group = group)
+    query <- paste0("SELECT device_id,brand FROM aware_device WHERE device_id = '", device, "'")
+    message(paste0("Executing the following query to infer phone OS: ", query)) 
+    os <- dbGetQuery(dbEngine, query)
+    dbDisconnect(dbEngine)
+
+    if(nrow(os) > 0)
+        return(os %>% mutate(os = ifelse(brand == "iPhone", "ios", "android")) %>% pull(os))
+    else
+        stop(paste("We cannot infer the OS of the following device id because it does not exist in the aware_device table:", device))
+
+    return(os)
+}
+
+
+
+
+

Implement a Format

+

A format file format.yaml describes the mapping between your stream’s raw data and the data that RAPIDS needs. This file has a section per sensor (e.g. PHONE_ACCELEROMETER), and each section has two attributes (keys):

+
    +
  1. +

    RAPIDS_COLUMN_MAPPINGS are mappings between the columns RAPIDS needs and the columns your raw data already has.

    +
      +
    1. The reserved keyword FLAG_TO_MUTATE flags columns that RAPIDS requires but that are not initially present in your container (database, CSV file). These columns have to be created by your mutation scripts.
    2. +
    +
  2. +
  3. +

    MUTATION. Sometimes your raw data needs to be transformed to match the format RAPIDS can handle (including creating columns marked as FLAG_TO_MUTATE)

    +
      +
    1. +

      COLUMN_MAPPINGS are mappings between the columns a mutation SCRIPT needs and the columns your raw data has.

      +
    2. +
    3. +

      SCRIPTS are a collection of R or Python scripts that transform one or more raw data columns into the format RAPIDS needs.

      +
    4. +
    +
  4. +
+
+

Hint

+

[RAPIDS_COLUMN_MAPPINGS] and [MUTATE][COLUMN_MAPPINGS] have a key (left-hand side string) and a value (right-hand side string). The values are the names used to pulled columns from a container (e.g., columns in a database table). All values are renamed to their keys in lower case. The renamed columns are sent to every mutation script within the data argument, and the final output is the input RAPIDS process further.

+

For example, let’s assume we are implementing beiwe_mysql and defining the following format for PHONE_FAKESENSOR:

+
PHONE_FAKESENSOR:
+    ANDROID:
+        RAPIDS_COLUMN_MAPPINGS:
+            TIMESTAMP: beiwe_timestamp
+            DEVICE_ID: beiwe_deviceID
+            MAGNITUDE_SQUARED: FLAG_TO_MUTATE
+        MUTATE:
+            COLUMN_MAPPINGS:
+                MAGNITUDE: beiwe_value
+            SCRIPTS:
+              - src/data/streams/mutations/phone/square_magnitude.py
+
+

RAPIDS will:

+
    +
  1. Download beiwe_timestamp, beiwe_deviceID, and beiwe_value from the container of beiwe_mysql (MySQL DB)
  2. +
  3. Rename these columns to timestamp, device_id, and magnitude, respectively.
  4. +
  5. Execute square_magnitude.py with a data frame as an argument containing the renamed columns. This script will square magnitude and rename it to magnitude_squared
  6. +
  7. Verify the data frame returned by square_magnitude.py has the columns RAPIDS needs timestamp, device_id, and magnitude_squared.
  8. +
  9. Use this data frame as the input to be processed in the pipeline.
  10. +
+

Note that although RAPIDS_COLUMN_MAPPINGS and [MUTATE][COLUMN_MAPPINGS] keys are in capital letters for readability (e.g. MAGNITUDE_SQUARED), the names of the final columns you mutate in your scripts should be lower case.

+
+

Let’s explain in more depth this column mapping with examples.

+

Name mapping

+

The mapping for some sensors is straightforward. For example, accelerometer data most of the time has a timestamp, three axes (x,y,z), and a device id that produced it. AWARE and a different sensing app like Beiwe likely logged accelerometer data in the same way but with different column names. In this case, we only need to match Beiwe data columns to RAPIDS columns one-to-one:

+
PHONE_ACCELEROMETER:
+  ANDROID:
+    RAPIDS_COLUMN_MAPPINGS:
+      TIMESTAMP: beiwe_timestamp
+      DEVICE_ID: beiwe_deviceID
+      DOUBLE_VALUES_0: beiwe_x
+      DOUBLE_VALUES_1: beiwe_y
+      DOUBLE_VALUES_2: beiwe_z
+    MUTATE:
+      COLUMN_MAPPINGS:
+      SCRIPTS: # it's ok if this is empty
+
+

Value mapping

+

For some sensors, we need to map column names and values. For example, screen data has ON and OFF events; let’s suppose Beiwe represents an ON event with the number 1, but RAPIDS identifies ON events with the number 2. In this case, we need to mutate the raw data coming from Beiwe and replace all 1s with 2s.

+

We do this by listing one or more R or Python scripts in MUTATION_SCRIPTS that will be executed in order. We usually store all mutation scripts under src/data/streams/mutations/[device]/[platform]/ and they can be reused across data streams.

+
PHONE_SCREEN:
+  ANDROID:
+    RAPIDS_COLUMN_MAPPINGS:
+      TIMESTAMP: beiwe_timestamp
+      DEVICE_ID: beiwe_deviceID
+      EVENT: beiwe_event
+     MUTATE:
+      COLUMN_MAPPINGS:
+      SCRIPTS:
+        - src/data/streams/mutations/phone/beiwe/beiwe_screen_map.py
+
+
+

Hint

+
    +
  • A MUTATION_SCRIPT can also be used to clean/preprocess your data before extracting behavioral features.
  • +
  • A mutation script has to have a main function that receives two arguments, data and stream_parameters.
  • +
  • The stream_parameters argument contains the config.yaml key/values of your data stream (this is the same argument that your container.[py|R] script receives, see Implement a Container).
  • +
+
+

Example of a python mutation script +

import pandas as pd
+
+def main(data, stream_parameters):
+    # mutate data
+    return(data)
+

+
+
+

Example of a R mutation script +

source("renv/activate.R") # needed to use RAPIDS renv environment
+library(dplyr)
+
+main <- function(data, stream_parameters){
+    # mutate data
+    return(data)
+}
+

+
+
+
+

Complex mapping

+

Sometimes, your raw data doesn’t even have the same columns RAPIDS expects for a sensor. For example, let’s pretend Beiwe stores PHONE_ACCELEROMETER axis data in a single column called acc_col instead of three. You have to create a MUTATION_SCRIPT to split acc_col into three columns x, y, and z.

+

For this, you mark the three axes columns RAPIDS needs in [RAPIDS_COLUMN_MAPPINGS] with the word FLAG_TO_MUTATE, map acc_col in [MUTATION][COLUMN_MAPPINGS], and list a Python script under [MUTATION][SCRIPTS] with the code to split acc_col. See an example below.

+

RAPIDS expects that every column mapped as FLAG_TO_MUTATE will be generated by your mutation script, so it won’t try to retrieve them from your container (database, CSV file, etc.).

+

In our example, acc_col will be fetched from the stream’s container and renamed to JOINED_AXES because beiwe_split_acc.py will split it into double_values_0, double_values_1, and double_values_2.

+
PHONE_ACCELEROMETER:
+  ANDROID:
+    RAPIDS_COLUMN_MAPPINGS:
+      TIMESTAMP: beiwe_timestamp
+      DEVICE_ID: beiwe_deviceID
+      DOUBLE_VALUES_0: FLAG_TO_MUTATE
+      DOUBLE_VALUES_1: FLAG_TO_MUTATE
+      DOUBLE_VALUES_2: FLAG_TO_MUTATE
+    MUTATE:
+      COLUMN_MAPPINGS:
+        JOINED_AXES: acc_col
+      SCRIPTS:
+        - src/data/streams/mutations/phone/beiwe/beiwe_split_acc.py
+
+

This is a draft of beiwe_split_acc.py MUTATION_SCRIPT: +

import pandas as pd
+
+def main(data, stream_parameters):
+    # data has the acc_col
+    # split acc_col into three columns: double_values_0, double_values_1, double_values_2 to match RAPIDS format
+    # remove acc_col since we don't need it anymore
+    return(data)
+

+

OS complex mapping

+

There is a special case for a complex mapping scenario for smartphone data streams. The Android and iOS sensor APIs return data in different formats for certain sensors (like screen, activity recognition, battery, among others).

+

In case you didn’t notice, the examples we have used so far are grouped under an ANDROID key, which means they will be applied to data collected by Android phones. Additionally, each sensor has an IOS key for a similar purpose. We use the complex mapping described above to transform iOS data into an Android format (it’s always iOS to Android and any new phone data stream must do the same).

+

For example, this is the format.yaml key for PHONE_ACTVITY_RECOGNITION. Note that the ANDROID mapping is simple (one-to-one) but the IOS mapping is complex with three FLAG_TO_MUTATE columns, two [MUTATE][COLUMN_MAPPINGS] mappings, and one [MUTATION][SCRIPT].

+
PHONE_ACTIVITY_RECOGNITION:
+  ANDROID:
+    RAPIDS_COLUMN_MAPPINGS:
+      TIMESTAMP: timestamp
+      DEVICE_ID: device_id
+      ACTIVITY_TYPE: activity_type
+      ACTIVITY_NAME: activity_name
+      CONFIDENCE: confidence
+    MUTATION:
+      COLUMN_MAPPINGS:
+      SCRIPTS:
+  IOS:
+    RAPIDS_COLUMN_MAPPINGS:
+      TIMESTAMP: timestamp
+      DEVICE_ID: device_id
+      ACTIVITY_TYPE: FLAG_TO_MUTATE
+      ACTIVITY_NAME: FLAG_TO_MUTATE
+      CONFIDENCE: FLAG_TO_MUTATE
+    MUTATION:
+      COLUMN_MAPPINGS:
+        ACTIVITIES: activities
+        CONFIDENCE: confidence
+      SCRIPTS:
+        - "src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R"
+
+
Example activity_recogniton_ios_unification.R

In this MUTATION_SCRIPT we create ACTIVITY_NAME and ACTIVITY_TYPE based on activities, and map confidence iOS values to Android values. +

source("renv/activate.R")
+library("dplyr", warn.conflicts = F)
+library(stringr)
+
+clean_ios_activity_column <- function(ios_gar){
+    ios_gar <- ios_gar %>%
+        mutate(activities = str_replace_all(activities, pattern = '("|\\[|\\])', replacement = ""))
+
+    existent_multiple_activities <- ios_gar %>%
+        filter(str_detect(activities, ",")) %>% 
+        group_by(activities) %>%
+        summarise(mutiple_activities = unique(activities), .groups = "drop_last") %>% 
+        pull(mutiple_activities)
+
+    known_multiple_activities <- c("stationary,automotive")
+    unkown_multiple_actvities <- setdiff(existent_multiple_activities, known_multiple_activities)
+    if(length(unkown_multiple_actvities) > 0){
+        stop(paste0("There are unkwown combinations of ios activities, you need to implement the decision of the ones to keep: ", unkown_multiple_actvities))
+    }
+
+    ios_gar <- ios_gar %>%
+        mutate(activities = str_replace_all(activities, pattern = "stationary,automotive", replacement = "automotive"))
+
+    return(ios_gar)
+}
+
+unify_ios_activity_recognition <- function(ios_gar){
+    # We only need to unify Google Activity Recognition data for iOS
+    # discard rows where activities column is blank
+    ios_gar <- ios_gar[-which(ios_gar$activities == ""), ]
+    # clean "activities" column of ios_gar
+    ios_gar <- clean_ios_activity_column(ios_gar)
+
+    # make it compatible with android version: generate "activity_name" and "activity_type" columns
+    ios_gar  <-  ios_gar %>% 
+        mutate(activity_name = case_when(activities == "automotive" ~ "in_vehicle",
+                                        activities == "cycling" ~ "on_bicycle",
+                                        activities == "walking" ~ "walking",
+                                        activities == "running" ~ "running",
+                                        activities == "stationary" ~ "still"),
+                activity_type = case_when(activities == "automotive" ~ 0,
+                                        activities == "cycling" ~ 1,
+                                        activities == "walking" ~ 7,
+                                        activities == "running" ~ 8,
+                                        activities == "stationary" ~ 3,
+                                        activities == "unknown" ~ 4),
+                confidence = case_when(confidence == 0 ~ 0,
+                                      confidence == 1 ~ 50,
+                                      confidence == 2 ~ 100)
+                                    ) %>% 
+        select(-activities)
+
+    return(ios_gar)
+}
+
+main <- function(data, stream_parameters){
+    return(unify_ios_activity_recognition(data, stream_parameters))
+}
+

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/aware-csv/index.html b/1.3/datastreams/aware-csv/index.html new file mode 100644 index 00000000..5c2ca9a0 --- /dev/null +++ b/1.3/datastreams/aware-csv/index.html @@ -0,0 +1,3123 @@ + + + + + + + + + + + + + + + + + + + + + + aware_csv - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

aware_csv

+

This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in CSV files.

+
+

Warning

+

The CSV files have to use , as separator, \ as escape character (do not escape " with ""), and wrap any string columns with ".

+

See examples in the CSV files inside rapids_example_csv.zip

+
Example of a valid CSV file
"_id","timestamp","device_id","activities","confidence","stationary","walking","running","automotive","cycling","unknown","label"
+1,1587528000000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,""
+2,1587528060000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+3,1587528120000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+4,1587528180000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+5,1587528240000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+6,1587528300000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+7,1587528360000,"13dbc8a3-dae3-4834-823a-4bc96a7d459d","[\"stationary\"]",2,1,0,0,0,0,0,"supplement"
+
+
+
+

Container

+

A CSV file per sensor, each containing the data for all participants.

+

The script to connect and download data from this container is at: +

src/data/streams/aware_csv/container.R
+

+

Format

+

If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn’t need to modify this format (described below).

+

Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs.

+

The yaml file that describes the format of this data stream is at: +

src/data/streams/aware_csv/format.yaml
+

+

For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works.

+
+

Hint

+

The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default.

+
+
PHONE_ACCELEROMETER
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_VALUES_0double_values_0
DOUBLE_VALUES_1double_values_1
DOUBLE_VALUES_2double_values_2
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_ACTIVITY_RECOGNITION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEactivity_name
ACTIVITY_TYPEactivity_type
CONFIDENCEconfidence
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEFLAG_TO_MUTATE
ACTIVITY_TYPEFLAG_TO_MUTATE
CONFIDENCEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
ACTIVITIESactivities
CONFIDENCEconfidence
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R
+
+
+

Note

+

For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE:

+
    +
  • if stream’s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0
  • +
  • if stream’s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1
  • +
  • if stream’s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7
  • +
  • if stream’s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8
  • +
  • if stream’s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3
  • +
  • if stream’s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4
  • +
+

For RAPIDS CONFIDENCE column:

+
    +
  • if stream’s confidence field is 0, set CONFIDENCE = 0
  • +
  • if stream’s confidence field is 1, set CONFIDENCE = 50
  • +
  • if stream’s confidence field is 2, set CONFIDENCE = 100
  • +
+
+
+
+
+
PHONE_APPLICATIONS_CRASHES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
APPLICATION_VERSIONapplication_version
ERROR_SHORTerror_short
ERROR_LONGerror_long
ERROR_CONDITIONerror_condition
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_FOREGROUND
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_NOTIFICATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
TEXTtext
SOUNDsound
VIBRATEvibrate
DEFAULTSdefaults
FLAGSflags
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_BATTERY
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSbattery_status
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSFLAG_TO_MUTATE
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
BATTERY_STATUSbattery_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/battery_ios_unification.R
+
+
+

Note

+

For RAPIDS BATTERY_STATUS column:

+
    +
  • if stream’s battery_status field is 3, set BATTERY_STATUS = 5 (full status)
  • +
  • if stream’s battery_status field is 1, set BATTERY_STATUS = 3 (discharge)
  • +
+
+
+
+

Same as ANDROID

+
+
+
+
PHONE_BLUETOOTH
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BT_ADDRESSbt_address
BT_NAMEbt_name
BT_RSSIbt_rssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+
PHONE_CALLS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEcall_type
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEFLAG_TO_MUTATE
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
CALL_TYPEcall_type
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/calls_ios_unification.R
+
+
+

Note

+

We transform iOS call logs into Android’s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android’s events: 1=incoming, 2=outgoing, 3=missed.

+

We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below):

+
    +
  • Search for the disconnected (4) status as it is common to all calls
  • +
  • Group all events that preceded every status 4
  • +
  • We convert every 1,2,4 (or 2,1,4) sequence to an incoming call
  • +
  • We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call
  • +
  • We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing)
  • +
  • We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up)
  • +
+

Tested with an Android (OnePlus 7T) and an iPhone XR

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Call typeAndroid (duration)iOS (duration)New Rule
Outgoing missed ended by me2 (0)3,4 (0,X)3,4 is converted to 2 with duration 0
Outgoing missed ended by them2(0)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2*
Incoming missed ended by meNA**1,4 (0,X)1,4 is converted to 3 with duration 0
Incoming missed ended by them3(0)1,4 (0,X)1,4 is converted to 3 with duration 0
Outgoing answered2(X excluding dialing time)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2
Incoming answered1(X excluding dialing time)1,2,4 (0,X,X2)1,2,4 is converted to 1 with duration X2
+

.* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered).

+

.** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant.

+
+
+
+
+
PHONE_CONVERSATION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTFLAG_TO_MUTATE
DOUBLE_CONVO_ENDFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R
+
+
+

Note

+

For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END:

+
    +
  • if stream’s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start.
  • +
  • if stream’s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end.
  • +
+
+
+
+
+
PHONE_KEYBOARD
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
BEFORE_TEXTbefore_text
CURRENT_TEXTcurrent_text
IS_PASSWORDis_password
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LIGHT
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LIGHT_LUXdouble_light_lux
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LOCATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LATITUDEdouble_latitude
DOUBLE_LONGITUDEdouble_longitude
DOUBLE_BEARINGdouble_bearing
DOUBLE_SPEEDdouble_speed
DOUBLE_ALTITUDEdouble_altitude
PROVIDERprovider
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_LOG
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
LOG_MESSAGElog_message
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_MESSAGES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MESSAGE_TYPEmessage_type
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_SCREEN
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSscreen_status
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
SCREEN_STATUSscreen_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/screen_ios_unification.R
+
+
+

Note

+

For SCREEN_STATUS RAPIDS column:

+
    +
  • if stream’s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode).
  • +
+
+
+
+
+
PHONE_WIFI_CONNECTED
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MAC_ADDRESSmac_address
SSIDssid
BSSIDbssid
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_WIFI_VISIBLE
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SSIDssid
BSSIDbssid
SECURITYsecurity
FREQUENCYfrequency
RSSIrssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/aware-influxdb/index.html b/1.3/datastreams/aware-influxdb/index.html new file mode 100644 index 00000000..a663d398 --- /dev/null +++ b/1.3/datastreams/aware-influxdb/index.html @@ -0,0 +1,3112 @@ + + + + + + + + + + + + + + + + + + + + + + aware_influxdb (beta) - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

aware_influxdb (beta)

+
+

Warning

+

This data stream is being released in beta while we test it thoroughly.

+
+

This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in an InfluxDB database.

+

Container

+

An InfluxDB database with a table per sensor, each containing the data for all participants.

+

The script to connect and download data from this container is at: +

src/data/streams/aware_influxdb/container.R
+

+

Format

+

If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn’t need to modify this format (described below).

+

Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs.

+

The yaml file that describes the format of this data stream is at: +

src/data/streams/aware_csv/format.yaml
+

+

For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works.

+
+

Hint

+

The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default.

+
+
PHONE_ACCELEROMETER
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_VALUES_0double_values_0
DOUBLE_VALUES_1double_values_1
DOUBLE_VALUES_2double_values_2
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_ACTIVITY_RECOGNITION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEactivity_name
ACTIVITY_TYPEactivity_type
CONFIDENCEconfidence
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEFLAG_TO_MUTATE
ACTIVITY_TYPEFLAG_TO_MUTATE
CONFIDENCEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
ACTIVITIESactivities
CONFIDENCEconfidence
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R
+
+
+

Note

+

For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE:

+
    +
  • if stream’s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0
  • +
  • if stream’s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1
  • +
  • if stream’s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7
  • +
  • if stream’s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8
  • +
  • if stream’s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3
  • +
  • if stream’s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4
  • +
+

For RAPIDS CONFIDENCE column:

+
    +
  • if stream’s confidence field is 0, set CONFIDENCE = 0
  • +
  • if stream’s confidence field is 1, set CONFIDENCE = 50
  • +
  • if stream’s confidence field is 2, set CONFIDENCE = 100
  • +
+
+
+
+
+
PHONE_APPLICATIONS_CRASHES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
APPLICATION_VERSIONapplication_version
ERROR_SHORTerror_short
ERROR_LONGerror_long
ERROR_CONDITIONerror_condition
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_FOREGROUND
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_NOTIFICATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
TEXTtext
SOUNDsound
VIBRATEvibrate
DEFAULTSdefaults
FLAGSflags
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_BATTERY
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSbattery_status
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSFLAG_TO_MUTATE
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
BATTERY_STATUSbattery_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/battery_ios_unification.R
+
+
+

Note

+

For RAPIDS BATTERY_STATUS column:

+
    +
  • if stream’s battery_status field is 3, set BATTERY_STATUS = 5 (full status)
  • +
  • if stream’s battery_status field is 1, set BATTERY_STATUS = 3 (discharge)
  • +
+
+
+
+

Same as ANDROID

+
+
+
+
PHONE_BLUETOOTH
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BT_ADDRESSbt_address
BT_NAMEbt_name
BT_RSSIbt_rssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+
PHONE_CALLS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEcall_type
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEFLAG_TO_MUTATE
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
CALL_TYPEcall_type
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/calls_ios_unification.R
+
+
+

Note

+

We transform iOS call logs into Android’s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android’s events: 1=incoming, 2=outgoing, 3=missed.

+

We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below):

+
    +
  • Search for the disconnected (4) status as it is common to all calls
  • +
  • Group all events that preceded every status 4
  • +
  • We convert every 1,2,4 (or 2,1,4) sequence to an incoming call
  • +
  • We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call
  • +
  • We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing)
  • +
  • We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up)
  • +
+

Tested with an Android (OnePlus 7T) and an iPhone XR

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Call typeAndroid (duration)iOS (duration)New Rule
Outgoing missed ended by me2 (0)3,4 (0,X)3,4 is converted to 2 with duration 0
Outgoing missed ended by them2(0)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2*
Incoming missed ended by meNA**1,4 (0,X)1,4 is converted to 3 with duration 0
Incoming missed ended by them3(0)1,4 (0,X)1,4 is converted to 3 with duration 0
Outgoing answered2(X excluding dialing time)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2
Incoming answered1(X excluding dialing time)1,2,4 (0,X,X2)1,2,4 is converted to 1 with duration X2
+

.* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered).

+

.** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant.

+
+
+
+
+
PHONE_CONVERSATION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTFLAG_TO_MUTATE
DOUBLE_CONVO_ENDFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R
+
+
+

Note

+

For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END:

+
    +
  • if stream’s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start.
  • +
  • if stream’s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end.
  • +
+
+
+
+
+
PHONE_KEYBOARD
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
BEFORE_TEXTbefore_text
CURRENT_TEXTcurrent_text
IS_PASSWORDis_password
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LIGHT
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LIGHT_LUXdouble_light_lux
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LOCATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LATITUDEdouble_latitude
DOUBLE_LONGITUDEdouble_longitude
DOUBLE_BEARINGdouble_bearing
DOUBLE_SPEEDdouble_speed
DOUBLE_ALTITUDEdouble_altitude
PROVIDERprovider
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_LOG
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
LOG_MESSAGElog_message
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_MESSAGES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MESSAGE_TYPEmessage_type
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_SCREEN
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSscreen_status
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
SCREEN_STATUSscreen_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/screen_ios_unification.R
+
+
+

Note

+

For SCREEN_STATUS RAPIDS column:

+
    +
  • if stream’s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode).
  • +
+
+
+
+
+
PHONE_WIFI_CONNECTED
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MAC_ADDRESSmac_address
SSIDssid
BSSIDbssid
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_WIFI_VISIBLE
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SSIDssid
BSSIDbssid
SECURITYsecurity
FREQUENCYfrequency
RSSIrssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/aware-mysql/index.html b/1.3/datastreams/aware-mysql/index.html new file mode 100644 index 00000000..29e1ef6a --- /dev/null +++ b/1.3/datastreams/aware-mysql/index.html @@ -0,0 +1,3108 @@ + + + + + + + + + + + + + + + + + + + + + + aware_mysql - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

aware_mysql

+

This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in a MySQL database.

+

Container

+

A MySQL database with a table per sensor, each containing the data for all participants. This is the default database created by the old PHP AWARE server (as opposed to the new JavaScript Micro server).

+

The script to connect and download data from this container is at: +

src/data/streams/aware_mysql/container.R
+

+

Format

+

If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn’t need to modify this format (described below).

+

Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs.

+

The yaml file that describes the format of this data stream is at: +

src/data/streams/aware_csv/format.yaml
+

+

For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works.

+
+

Hint

+

The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default.

+
+
PHONE_ACCELEROMETER
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_VALUES_0double_values_0
DOUBLE_VALUES_1double_values_1
DOUBLE_VALUES_2double_values_2
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_ACTIVITY_RECOGNITION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEactivity_name
ACTIVITY_TYPEactivity_type
CONFIDENCEconfidence
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEFLAG_TO_MUTATE
ACTIVITY_TYPEFLAG_TO_MUTATE
CONFIDENCEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
ACTIVITIESactivities
CONFIDENCEconfidence
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R
+
+
+

Note

+

For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE:

+
    +
  • if stream’s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0
  • +
  • if stream’s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1
  • +
  • if stream’s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7
  • +
  • if stream’s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8
  • +
  • if stream’s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3
  • +
  • if stream’s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4
  • +
+

For RAPIDS CONFIDENCE column:

+
    +
  • if stream’s confidence field is 0, set CONFIDENCE = 0
  • +
  • if stream’s confidence field is 1, set CONFIDENCE = 50
  • +
  • if stream’s confidence field is 2, set CONFIDENCE = 100
  • +
+
+
+
+
+
PHONE_APPLICATIONS_CRASHES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
APPLICATION_VERSIONapplication_version
ERROR_SHORTerror_short
ERROR_LONGerror_long
ERROR_CONDITIONerror_condition
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_FOREGROUND
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_NOTIFICATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
TEXTtext
SOUNDsound
VIBRATEvibrate
DEFAULTSdefaults
FLAGSflags
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_BATTERY
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSbattery_status
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSFLAG_TO_MUTATE
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
BATTERY_STATUSbattery_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/battery_ios_unification.R
+
+
+

Note

+

For RAPIDS BATTERY_STATUS column:

+
    +
  • if stream’s battery_status field is 3, set BATTERY_STATUS = 5 (full status)
  • +
  • if stream’s battery_status field is 1, set BATTERY_STATUS = 3 (discharge)
  • +
+
+
+
+

Same as ANDROID

+
+
+
+
PHONE_BLUETOOTH
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BT_ADDRESSbt_address
BT_NAMEbt_name
BT_RSSIbt_rssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+
PHONE_CALLS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEcall_type
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEFLAG_TO_MUTATE
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
CALL_TYPEcall_type
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/calls_ios_unification.R
+
+
+

Note

+

We transform iOS call logs into Android’s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android’s events: 1=incoming, 2=outgoing, 3=missed.

+

We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below):

+
    +
  • Search for the disconnected (4) status as it is common to all calls
  • +
  • Group all events that preceded every status 4
  • +
  • We convert every 1,2,4 (or 2,1,4) sequence to an incoming call
  • +
  • We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call
  • +
  • We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing)
  • +
  • We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up)
  • +
+

Tested with an Android (OnePlus 7T) and an iPhone XR

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Call typeAndroid (duration)iOS (duration)New Rule
Outgoing missed ended by me2 (0)3,4 (0,X)3,4 is converted to 2 with duration 0
Outgoing missed ended by them2(0)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2*
Incoming missed ended by meNA**1,4 (0,X)1,4 is converted to 3 with duration 0
Incoming missed ended by them3(0)1,4 (0,X)1,4 is converted to 3 with duration 0
Outgoing answered2(X excluding dialing time)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2
Incoming answered1(X excluding dialing time)1,2,4 (0,X,X2)1,2,4 is converted to 1 with duration X2
+

.* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered).

+

.** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant.

+
+
+
+
+
PHONE_CONVERSATION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTFLAG_TO_MUTATE
DOUBLE_CONVO_ENDFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R
+
+
+

Note

+

For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END:

+
    +
  • if stream’s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start.
  • +
  • if stream’s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end.
  • +
+
+
+
+
+
PHONE_KEYBOARD
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
BEFORE_TEXTbefore_text
CURRENT_TEXTcurrent_text
IS_PASSWORDis_password
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LIGHT
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LIGHT_LUXdouble_light_lux
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LOCATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LATITUDEdouble_latitude
DOUBLE_LONGITUDEdouble_longitude
DOUBLE_BEARINGdouble_bearing
DOUBLE_SPEEDdouble_speed
DOUBLE_ALTITUDEdouble_altitude
PROVIDERprovider
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_LOG
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
LOG_MESSAGElog_message
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_MESSAGES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MESSAGE_TYPEmessage_type
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_SCREEN
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSscreen_status
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
SCREEN_STATUSscreen_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/screen_ios_unification.R
+
+
+

Note

+

For SCREEN_STATUS RAPIDS column:

+
    +
  • if stream’s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode).
  • +
+
+
+
+
+
PHONE_WIFI_CONNECTED
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MAC_ADDRESSmac_address
SSIDssid
BSSIDbssid
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_WIFI_VISIBLE
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SSIDssid
BSSIDbssid
SECURITYsecurity
FREQUENCYfrequency
RSSIrssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/data-streams-introduction/index.html b/1.3/datastreams/data-streams-introduction/index.html new file mode 100644 index 00000000..55f639cd --- /dev/null +++ b/1.3/datastreams/data-streams-introduction/index.html @@ -0,0 +1,1960 @@ + + + + + + + + + + + + + + + + + + + + + + Introduction - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Data Streams Introduction

+

A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container.

+

For example, the aware_mysql data stream handles smartphone data (device) collected with the AWARE Framework (format) stored in a MySQL database (container). Similarly, smartphone data collected with Beiwe will have a different format and could be stored in a container like a PostgreSQL database or a CSV file.

+

If you want to process a data stream using RAPIDS, make sure that your data is stored in a supported format and container (see table below).

+

If RAPIDS doesn’t support your data stream yet (e.g. Beiwe data stored in PostgreSQL, or AWARE data stored in SQLite), you can always implement a new data stream. If it’s something you think other people might be interested on, we will be happy to include your new data stream in RAPIDS, so get in touch!.

+
+

Hint

+

Currently, you can add new data streams for smartphones, Fitbit, and Empatica devices. If you need RAPIDS to process data from other devices, like Oura Rings or Actigraph wearables, get in touch. It is a more complicated process that could take a couple of days to implement for someone familiar with R or Python, but we would be happy to work on it together.

+
+

For reference, these are the data streams we currently support:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Data StreamDeviceFormatContainerDocs
aware_mysqlPhoneAWARE appMySQLlink
aware_csvPhoneAWARE appCSV fileslink
aware_influxdb (beta)PhoneAWARE appInfluxDBlink
fitbitjson_mysqlFitbitJSON (per Fitbit’s API)MySQLlink
fitbitjson_csvFitbitJSON (per Fitbit’s API)CSV fileslink
fitbitparsed_mysqlFitbitParsed (parsed API data)MySQLlink
fitbitparsed_csvFitbitParsed (parsed API data)CSV fileslink
empatica_zipEmpaticaE4 ConnectZIP fileslink
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/empatica-zip/index.html b/1.3/datastreams/empatica-zip/index.html new file mode 100644 index 00000000..23ef997b --- /dev/null +++ b/1.3/datastreams/empatica-zip/index.html @@ -0,0 +1,2165 @@ + + + + + + + + + + + + + + + + + + + + + + empatica_zip - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

empatica_zip

+

This data stream handles Empatica sensor data downloaded as zip files using the E4 Connect.

+

Container

+

You need to create a subfolder for every participant named after their device id inside the folder specified by [EMPATICA_DATA_STREAMS][empatica_zipfiles][FOLDER]. You can add one or more Empatica zip files to any subfolder.

+

The script to connect and download data from this container is at: +

src/data/streams/empatica_zip/container.R
+

+

Format

+

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Empatica sensors. This file is at:

+
src/data/streams/empatica_zip/format.yaml
+
+

All columns are mutated from the raw data in the zip files so you don’t need to modify any column mappings.

+
EMPATICA_ACCELEROMETER

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_VALUES_0double_values_0
DOUBLE_VALUES_1double_values_1
DOUBLE_VALUES_2double_values_2
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_HEARTRATE

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
HEARTRATEheartrate
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_TEMPERATURE

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
TEMPERATUREtemperature
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_ELECTRODERMAL_ACTIVITY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ELECTRODERMAL_ACTIVITYelectrodermal_activity
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_BLOOD_VOLUME_PULSE

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BLOOD_VOLUME_PULSEblood_volume_pulse
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_INTER_BEAT_INTERVAL

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
INTER_BEAT_INTERVALinter_beat_interval
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
EMPATICA_EMPATICA_TAGS

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
TAGStags
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/fitbitjson-csv/index.html b/1.3/datastreams/fitbitjson-csv/index.html new file mode 100644 index 00000000..84642ed3 --- /dev/null +++ b/1.3/datastreams/fitbitjson-csv/index.html @@ -0,0 +1,2515 @@ + + + + + + + + + + + + + + + + + + + + + + fitbitjson_csv - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

fitbitjson_csv

+

This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a CSV file, RAPIDS can process it.

+
+

Warning

+

The CSV files have to use , as separator, \ as escape character (do not escape " with ""), and wrap any string columns with ".

+
Example of a valid CSV file
"timestamp","device_id","label","fitbit_id","fitbit_data_type","fitbit_data"
+1587614400000,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524","5S","5ZKN9B","steps","{\"activities-steps\":[{\"dateTime\":\"2020-04-23\",\"value\":\"7881\"}]"
+
+
+
+

Container

+

The container should be a CSV file per Fitbit sensor, each containing all participants’ data.

+

The script to connect and download data from this container is at: +

src/data/streams/fitbitjson_csv/container.R
+

+

Format

+

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitjson_csv/format.yaml
+
+

If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESOUTOFRANGEFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESFATBURNFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESCARDIOFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESPEAKFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATEFLAG_TO_MUTATE
HEARTRATE_ZONEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMEFLAG_TO_MUTATE
LOCAL_END_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
EFFICIENCYFLAG_TO_MUTATE
MINUTES_AFTER_WAKEUPFLAG_TO_MUTATE
MINUTES_ASLEEPFLAG_TO_MUTATE
MINUTES_AWAKEFLAG_TO_MUTATE
MINUTES_TO_FALL_ASLEEPFLAG_TO_MUTATE
MINUTES_IN_BEDFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py
    +- src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
TYPE_EPISODE_IDFLAG_TO_MUTATE
DURATIONFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
LEVELFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/fitbitjson-mysql/index.html b/1.3/datastreams/fitbitjson-mysql/index.html new file mode 100644 index 00000000..363683f1 --- /dev/null +++ b/1.3/datastreams/fitbitjson-mysql/index.html @@ -0,0 +1,2507 @@ + + + + + + + + + + + + + + + + + + + + + + fitbitjson_mysql - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

fitbitjson_mysql

+

This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a MySQL database, RAPIDS can process it.

+

Container

+

The container should be a MySQL database with a table per sensor, each containing all participants’ data.

+

The script to connect and download data from this container is at: +

src/data/streams/fitbitjson_mysql/container.R
+

+

Format

+

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitjson_csv/format.yaml
+
+

If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESOUTOFRANGEFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESFATBURNFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESCARDIOFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESPEAKFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATEFLAG_TO_MUTATE
HEARTRATE_ZONEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMEFLAG_TO_MUTATE
LOCAL_END_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
EFFICIENCYFLAG_TO_MUTATE
MINUTES_AFTER_WAKEUPFLAG_TO_MUTATE
MINUTES_ASLEEPFLAG_TO_MUTATE
MINUTES_AWAKEFLAG_TO_MUTATE
MINUTES_TO_FALL_ASLEEPFLAG_TO_MUTATE
MINUTES_IN_BEDFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py
    +- src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
TYPE_EPISODE_IDFLAG_TO_MUTATE
DURATIONFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
LEVELFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/fitbitparsed-csv/index.html b/1.3/datastreams/fitbitparsed-csv/index.html new file mode 100644 index 00000000..e6b01e52 --- /dev/null +++ b/1.3/datastreams/fitbitparsed-csv/index.html @@ -0,0 +1,2573 @@ + + + + + + + + + + + + + + + + + + + + + + fitbitparsed_csv - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

fitbitparsed_csv

+

This data stream handles Fitbit sensor data downloaded using the Fitbit Web API, parsed, and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a CSV file, RAPIDS can process it.

+
+

What is the difference between JSON and plain data streams

+

Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit’s API. However, if, for some reason, you don’t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream.

+
+
+

Warning

+

The CSV files have to use , as separator, \ as escape character (do not escape " with ""), and wrap any string columns with ".

+
Example of a valid CSV file
"device_id","heartrate","heartrate_zone","local_date_time","timestamp"
+"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",69,"outofrange","2020-04-23 00:00:00",0
+"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",69,"outofrange","2020-04-23 00:01:00",0
+"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",67,"outofrange","2020-04-23 00:02:00",0
+"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",69,"outofrange","2020-04-23 00:03:00",0
+
+
+
+

Container

+

The container should be a CSV file per sensor, each containing all participants’ data.

+

The script to connect and download data from this container is at: +

src/data/streams/fitbitparsed_csv/container.R
+

+

Format

+

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitparsed_mysql/format.yaml
+
+

If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names.

+

All columns are mandatory; however, all except device_id and local_date_time can be empty if you don’t have that data. Just have in mind that some features will be empty if some of these columns are empty.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRheartrate_daily_restinghr
HEARTRATE_DAILY_CALORIESOUTOFRANGEheartrate_daily_caloriesoutofrange
HEARTRATE_DAILY_CALORIESFATBURNheartrate_daily_caloriesfatburn
HEARTRATE_DAILY_CALORIESCARDIOheartrate_daily_caloriescardio
HEARTRATE_DAILY_CALORIESPEAKheartrate_daily_caloriespeak
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrate_daily_restinghrheartrate_daily_caloriesoutofrangeheartrate_daily_caloriesfatburnheartrate_daily_caloriescardioheartrate_daily_caloriespeak
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07721200.6102760.302015.20480
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-08701100.1120660.001223.70880
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-0969750.3615734.1516131.85790
+
+
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATEheartrate
HEARTRATE_ZONEheartrate_zone
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrateheartrate_zone
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:0068outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:0067outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:0067outofrange
+
+
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMElocal_start_date_time
LOCAL_END_DATE_TIMElocal_end_date_time
DEVICE_IDdevice_id
EFFICIENCYefficiency
MINUTES_AFTER_WAKEUPminutes_after_wakeup
MINUTES_ASLEEPminutes_asleep
MINUTES_AWAKEminutes_awake
MINUTES_TO_FALL_ASLEEPminutes_to_fall_asleep
MINUTES_IN_BEDminutes_in_bed
IS_MAIN_SLEEPis_main_sleep
TYPEtype
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_start_date_timelocal_end_date_timeefficiencyminutes_after_wakeupminutes_asleepminutes_awakeminutes_to_fall_asleepminutes_in_bedis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 15:36:302020-10-10 16:37:009205550600classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 01:46:302020-10-10 08:10:008803186503831stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-11 00:12:302020-10-11 11:47:0089156213206941stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-12 01:31:002020-10-12 09:34:309304156804831stages
+
+
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
TYPE_EPISODE_IDtype_episode_id
DURATIONduration
IS_MAIN_SLEEPis_main_sleep
TYPEtype
LEVELlevel
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idtype_episode_idlocal_date_timedurationlevelis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:36:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:37:30660asleep0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:48:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba524
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:46:30420light1stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:53:301230deep1stages
+
+
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-071775
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-083201
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-09998
+
+
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:005
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:003
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:000
+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/fitbitparsed-mysql/index.html b/1.3/datastreams/fitbitparsed-mysql/index.html new file mode 100644 index 00000000..c598edb2 --- /dev/null +++ b/1.3/datastreams/fitbitparsed-mysql/index.html @@ -0,0 +1,2562 @@ + + + + + + + + + + + + + + + + + + + + + + fitbitparsed_mysql - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

fitbitparsed_mysql

+

This data stream handles Fitbit sensor data downloaded using the Fitbit Web API, parsed, and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a MySQL database, RAPIDS can process it.

+
+

What is the difference between JSON and plain data streams

+

Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit’s API. However, if, for some reason, you don’t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream.

+
+

Container

+

The container should be a MySQL database with a table per sensor, each containing all participants’ data.

+

The script to connect and download data from this container is at: +

src/data/streams/fitbitparsed_mysql/container.R
+

+

Format

+

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitparsed_mysql/format.yaml
+
+

If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names.

+

All columns are mandatory; however, all except device_id and local_date_time can be empty if you don’t have that data. Just have in mind that some features will be empty if some of these columns are empty.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRheartrate_daily_restinghr
HEARTRATE_DAILY_CALORIESOUTOFRANGEheartrate_daily_caloriesoutofrange
HEARTRATE_DAILY_CALORIESFATBURNheartrate_daily_caloriesfatburn
HEARTRATE_DAILY_CALORIESCARDIOheartrate_daily_caloriescardio
HEARTRATE_DAILY_CALORIESPEAKheartrate_daily_caloriespeak
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrate_daily_restinghrheartrate_daily_caloriesoutofrangeheartrate_daily_caloriesfatburnheartrate_daily_caloriescardioheartrate_daily_caloriespeak
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07721200.6102760.302015.20480
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-08701100.1120660.001223.70880
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-0969750.3615734.1516131.85790
+
+
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATEheartrate
HEARTRATE_ZONEheartrate_zone
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrateheartrate_zone
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:0068outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:0067outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:0067outofrange
+
+
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMElocal_start_date_time
LOCAL_END_DATE_TIMElocal_end_date_time
DEVICE_IDdevice_id
EFFICIENCYefficiency
MINUTES_AFTER_WAKEUPminutes_after_wakeup
MINUTES_ASLEEPminutes_asleep
MINUTES_AWAKEminutes_awake
MINUTES_TO_FALL_ASLEEPminutes_to_fall_asleep
MINUTES_IN_BEDminutes_in_bed
IS_MAIN_SLEEPis_main_sleep
TYPEtype
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_start_date_timelocal_end_date_timeefficiencyminutes_after_wakeupminutes_asleepminutes_awakeminutes_to_fall_asleepminutes_in_bedis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 15:36:302020-10-10 16:37:009205550600classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 01:46:302020-10-10 08:10:008803186503831stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-11 00:12:302020-10-11 11:47:0089156213206941stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-12 01:31:002020-10-12 09:34:309304156804831stages
+
+
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
TYPE_EPISODE_IDtype_episode_id
DURATIONduration
IS_MAIN_SLEEPis_main_sleep
TYPEtype
LEVELlevel
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idtype_episode_idlocal_date_timedurationlevelis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:36:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:37:30660asleep0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:48:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba524
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:46:30420light1stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:53:301230deep1stages
+
+
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-071775
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-083201
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-09998
+
+
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:005
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:003
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:000
+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/mandatory-empatica-format/index.html b/1.3/datastreams/mandatory-empatica-format/index.html new file mode 100644 index 00000000..9010f893 --- /dev/null +++ b/1.3/datastreams/mandatory-empatica-format/index.html @@ -0,0 +1,2054 @@ + + + + + + + + + + + + + + + + + + + + + + Mandatory Empatica Format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Mandatory Empatica Format

+

This is a description of the format RAPIDS needs to process data for the following Empatica sensors.

+
EMPATICA_ACCELEROMETER + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
DOUBLE_VALUES_0x axis of acceleration
DOUBLE_VALUES_1y axis of acceleration
DOUBLE_VALUES_2z axis of acceleration
+
+
EMPATICA_HEARTRATE + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
HEARTRATEIntraday heartrate
+
+
EMPATICA_TEMPERATURE + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
TEMPERATUREtemperature
+
+
EMPATICA_ELECTRODERMAL_ACTIVITY + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
ELECTRODERMAL_ACTIVITYelectrical conductance
+
+
EMPATICA_BLOOD_VOLUME_PULSE + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
BLOOD_VOLUME_PULSEblood volume pulse
+
+
EMPATICA_INTER_BEAT_INTERVAL + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
INTER_BEAT_INTERVALinter beat interval
+
+
EMPATICA_TAGS + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
DEVICE_IDA string that uniquely identifies a device
TAGStags
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/mandatory-fitbit-format/index.html b/1.3/datastreams/mandatory-fitbit-format/index.html new file mode 100644 index 00000000..1a06c878 --- /dev/null +++ b/1.3/datastreams/mandatory-fitbit-format/index.html @@ -0,0 +1,2119 @@ + + + + + + + + + + + + + + + + + + + + + + Mandatory Fitbit Format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Mandatory Fitbit Format

+

This is a description of the format RAPIDS needs to process data for the following Fitbit sensors.

+
FITBIT_HEARTRATE_SUMMARY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss
DEVICE_IDA string that uniquely identifies a device
HEARTRATE_DAILY_RESTINGHRDaily resting heartrate
HEARTRATE_DAILY_CALORIESOUTOFRANGECalories spent while heartrate was oustide a heartrate zone
HEARTRATE_DAILY_CALORIESFATBURNCalories spent while heartrate was inside the fat burn zone
HEARTRATE_DAILY_CALORIESCARDIOCalories spent while heartrate was inside the cardio zone
HEARTRATE_DAILY_CALORIESPEAKCalories spent while heartrate was inside the peak zone
+
+
FITBIT_HEARTRATE_INTRADAY + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss
DEVICE_IDA string that uniquely identifies a device
HEARTRATEIntraday heartrate
HEARTRATE_ZONEHeartrate zone that HEARTRATE belongs to. It is based on the heartrate zone ranges of each device
+
+
FITBIT_SLEEP_SUMMARY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd 00:00:00, the date is the same as the start date of a daily sleep episode if its time is after SLEEP_SUMMARY_LAST_NIGHT_END, otherwise it is the day before the start date of that sleep episode
LOCAL_START_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss representing the start of a daily sleep episode
LOCAL_END_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss representing the end of a daily sleep episode
DEVICE_IDA string that uniquely identifies a device
EFFICIENCYSleep efficiency computed by fitbit as time asleep / (total time in bed - time to fall asleep)
MINUTES_AFTER_WAKEUPMinutes the participant spent in bed after waking up
MINUTES_ASLEEPMinutes the participant was asleep
MINUTES_AWAKEMinutes the participant was awake
MINUTES_TO_FALL_ASLEEPMinutes the participant spent in bed before falling asleep
MINUTES_IN_BEDMinutes the participant spent in bed across the sleep episode
IS_MAIN_SLEEP0 if this episode is a nap, or 1 if it is a main sleep episode
TYPEstages or classic sleep data
+
+
FITBIT_SLEEP_INTRADAY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss, this either is a copy of LOCAL_START_DATE_TIME or LOCAL_END_DATE_TIME depending on which column is used to assign an episode to a specific day
DEVICE_IDA string that uniquely identifies a device
TYPE_EPISODE_IDAn id for each unique main or nap episode. Main and nap episodes have different levels, each row in this table is one of such levels, so multiple rows can have the same TYPE_EPISODE_ID
DURATIONDuration of the episode level in minutes
IS_MAIN_SLEEP0 if this episode level belongs to a nap, or 1 if it belongs to a main sleep episode
TYPEtype of level: stages or classic sleep data
LEVELFor stages levels one of wake, deep, light, or rem. For classic levels one of awake, restless, and asleep
+
+
FITBIT_STEPS_SUMMARY + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss
DEVICE_IDA string that uniquely identifies a device
STEPSDaily step count
+
+
FITBIT_STEPS_INTRADAY + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS)
LOCAL_DATE_TIMEDate time string with format yyyy-mm-dd hh:mm:ss
DEVICE_IDA string that uniquely identifies a device
STEPSIntraday step count (usually every minute)
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/datastreams/mandatory-phone-format/index.html b/1.3/datastreams/mandatory-phone-format/index.html new file mode 100644 index 00000000..35a5dfd0 --- /dev/null +++ b/1.3/datastreams/mandatory-phone-format/index.html @@ -0,0 +1,2453 @@ + + + + + + + + + + + + + + + + + + + + + + Mandatory Phone Format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Mandatory Phone Format

+

This is a description of the format RAPIDS needs to process data for the following PHONE sensors.

+

See examples in the CSV files inside rapids_example_csv.zip

+
PHONE_ACCELEROMETER + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
DOUBLE_VALUES_0x axis of acceleration
DOUBLE_VALUES_1y axis of acceleration
DOUBLE_VALUES_2z axis of acceleration
+
+
PHONE_ACTIVITY_RECOGNITION + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
ACTIVITY_NAMEAn string that denotes current activity name: in_vehicle, on_bicycle, on_foot, still, unknown, tilting, walking or running
ACTIVITY_TYPEAn integer (ranged from 0 to 8) that denotes current activity type
CONFIDENCEAn integer (ranged from 0 to 100) that denotes the prediction accuracy
+
+
PHONE_APPLICATIONS_CRASHES + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
PACKAGE_NAMEApplication’s package name
APPLICATION_NAMEApplication’s localized name
APPLICATION_VERSIONApplication’s version code
ERROR_SHORTShort description of the error
ERROR_LONGMore verbose version of the error description
ERROR_CONDITION1 = code error; 2 = non-responsive (ANR error)
IS_SYSTEM_APPDevice’s pre-installed application
+
+
PHONE_APPLICATIONS_FOREGROUND + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
PACKAGE_NAMEApplication’s package name
APPLICATION_NAMEApplication’s localized name
IS_SYSTEM_APPDevice’s pre-installed application
+
+
PHONE_APPLICATIONS_NOTIFICATIONS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
PACKAGE_NAMEApplication’s package name
APPLICATION_NAMEApplication’s localized name
TEXTNotification’s header text, not the content
SOUNDNotification’s sound source (if applicable)
VIBRATENotification’s vibration pattern (if applicable)
DEFAULTSIf notification was delivered according to device’s default settings
FLAGSAn integer that denotes Android notification flag
+
+
PHONE_BATTERY + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
BATTERY_STATUSAn integer that denotes battery status: 0 or 1 = unknown, 2 = charging, 3 = discharging, 4 = not charging, 5 = full
BATTERY_LEVELAn integer that denotes battery level, between 0 and BATTERY_SCALE
BATTERY_SCALEAn integer that denotes the maximum battery level
+
+
PHONE_BLUETOOTH + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
BT_ADDRESSMAC address of the device’s Bluetooth sensor
BT_NAMEUser assigned name of the device’s Bluetooth sensor
BT_RSSIThe RSSI dB to the scanned device
+
+
PHONE_CALLS + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
CALL_TYPEAn integer that denotes call type: 1 = incoming, 2 = outgoing, 3 = missed
CALL_DURATIONLength of the call session
TRACESHA-1 one-way source/target of the call
+
+
PHONE_CONVERSATION + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
DOUBLE_ENERGYA number that denotes the amplitude of an audio sample (L2-norm of the audio frame)
INFERENCEAn integer (ranged from 0 to 3) that denotes the type of an audio sample: 0 = silence, 1 = noise, 2 = voice, 3 = unknown
DOUBLE_CONVO_STARTUNIX timestamp (13 digits) of the beginning of a conversation
DOUBLE_CONVO_ENDUNIX timestamp (13 digits) of the end of a conversation
+
+
PHONE_KEYBOARD + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
PACKAGE_NAMEThe application’s package name of keyboard interaction
BEFORE_TEXTThe previous keyboard input (empty if password)
CURRENT_TEXTThe current keyboard input (empty if password)
IS_PASSWORDAn integer: 0 = not password; 1 = password
+
+
PHONE_LIGHT + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
DOUBLE_LIGHT_LUXThe ambient luminance in lux units
ACCURACYAn integer that denotes the sensor’s accuracy level: 3 = maximum accuracy, 2 = medium accuracy, 1 = low accuracy
+
+
PHONE_LOCATIONS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
DOUBLE_LATITUDEThe location’s latitude, in degrees
DOUBLE_LONGITUDEThe location’s longitude, in degrees
DOUBLE_BEARINGThe location’s bearing, in degrees
DOUBLE_SPEEDThe speed if available, in meters/second over ground
DOUBLE_ALTITUDEThe altitude if available, in meters above sea level
PROVIDERA string that denotes the provider: gps, fused or network
ACCURACYThe estimated location accuracy
+
+
PHONE_LOG + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
LOG_MESSAGEA string that denotes log message
+
+
PHONE_MESSAGES + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
MESSAGE_TYPEAn integer that denotes message type: 1 = received, 2 = sent
TRACESHA-1 one-way source/target of the message
+
+
PHONE_SCREEN + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
SCREEN_STATUSAn integer that denotes screen status: 0 = off, 1 = on, 2 = locked, 3 = unlocked
+
+
PHONE_WIFI_CONNECTED + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
MAC_ADDRESSDevice’s MAC address
SSIDCurrently connected access point network name
BSSIDCurrently connected access point MAC address
+
+
PHONE_WIFI_VISIBLE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnDescription
TIMESTAMPAn UNIX timestamp (13 digits) when a row of data was logged
DEVICE_IDA string that uniquely identifies a device
SSIDDetected access point network name
BSSIDDetected access point MAC address
SECURITYActive security protocols
FREQUENCYWi-Fi band frequency (e.g., 2427, 5180), in Hz
RSSIRSSI dB to the scanned device
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/documentation/index.html b/1.3/developers/documentation/index.html new file mode 100644 index 00000000..7ad8360d --- /dev/null +++ b/1.3/developers/documentation/index.html @@ -0,0 +1,2012 @@ + + + + + + + + + + + + + + + + + + + + + + Documentation - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Documentation

+

We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically.

+

Set up development environment

+
    +
  1. Make sure your conda environment is active
  2. +
  3. pip install mkdocs
  4. +
  5. pip install mkdocs-material
  6. +
+

Preview

+

Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000:

+
mkdocs serve
+
+

File Structure

+

The documentation config file is /mkdocs.yml, if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation.

+

Reference

+

Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements.

+
+

Hint

+

Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: +

[mylink](../setup/installation.md)
+

+
+

Extras

+

You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources:

+ +

You can use this page to create markdown tables more easily

+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/git-flow/index.html b/1.3/developers/git-flow/index.html new file mode 100644 index 00000000..af0b4432 --- /dev/null +++ b/1.3/developers/git-flow/index.html @@ -0,0 +1,2100 @@ + + + + + + + + + + + + + + + + + + + + + + Git Flow - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Git Flow

+

We use the develop/master variation of the OneFlow git flow

+

Add New Features

+

We use feature (topic) branches to implement new features

+
+

You are an internal developer if you have writing permissions to the repository.

+

Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online.

+

Starting your feature branch

+
    +
  1. Pull the latest develop +
    git checkout develop
    +git pull
    +
  2. +
  3. Create your feature branch +
    git checkout -b feature/feature1
    +
  4. +
  5. Add, modify or delete the necessary files to add your new feature
  6. +
  7. Update the change log (docs/change-log.md)
  8. +
  9. Stage and commit your changes using VS Code git GUI or the following commands +
    git add modified-file1 modified-file2
    +git commit -m "Add my new feature" # use a concise description
    +
  10. +
+

Merging back your feature branch

+

If your changes took time to be implemented it is possible that there are new commits in our develop branch, so we need to rebase your feature branch.

+
    +
  1. +

    Fetch the latest changes to develop +

    git fetch origin develop
    +

    +
  2. +
  3. +

    Rebase your feature branch +

    git checkout feature/feature1
    +git rebase -i develop
    +

    +
  4. +
  5. +

    Integrate your new feature to develop +

    git checkout develop
    +git merge --no-ff feature/feature1 # (use the default merge message)
    +git push origin develop
    +git branch -d feature/feature1
    +

    +
  6. +
+
+
+

You are an external developer if you do NOT have writing permissions to the repository.

+

Starting your feature branch

+
    +
  1. Fork and clone our repository on Github
  2. +
  3. Switch to the latest develop +
    git checkout develop
    +
  4. +
  5. Create your feature branch +
    git checkout -b feature/external-test
    +
  6. +
  7. Add, modify or delete the necessary files to add your new feature
  8. +
  9. Stage and commit your changes using VS Code git GUI or the following commands +
    git add modified-file1 modified-file2
    +git commit -m "Add my new feature" # use a concise description
    +
  10. +
+

Merging back your feature branch

+

If your changes took time to be implemented, it is possible that there are new commits in our develop branch, so we need to rebase your feature branch.

+
    +
  1. +

    Add our repo as another remote +

    git remote add upstream https://github.com/carissalow/rapids/
    +

    +
  2. +
  3. +

    Fetch the latest changes to develop +

    git fetch upstream develop 
    +

    +
  4. +
  5. +

    Rebase your feature branch +

    git checkout feature/external-test
    +git rebase -i develop
    +

    +
  6. +
  7. +

    Push your feature branch online +

    git push --set-upstream origin feature/external-test
    +

    +
  8. +
  9. +

    Open a pull request to the develop branch using Github’s GUI

    +
  10. +
+
+
+

Release a New Version

+
    +
  1. Pull the latest develop +
    git checkout develop
    +git pull
    +
  2. +
  3. Create a new release branch +
    git describe --abbrev=0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX)
    +git checkout -b release/v[NEW_RELEASE] develop
    +
  4. +
  5. Add new tag +
    git tag v[NEW_RELEASE]
    +
  6. +
  7. Merge and push the release branch +
    git checkout develop
    +git merge release/v[NEW_RELEASE]
    +git push --tags origin develop
    +git branch -d release/v[NEW_RELEASE]
    +
  8. +
  9. Fast-forward master +
    git checkout master
    +git merge --ff-only develop
    +git push
    +
  10. +
  11. Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log)
  12. +
+

Release a Hotfix

+
    +
  1. Pull the latest master +
    git checkout master
    +git pull
    +
  2. +
  3. Start a hotfix branch +
    git describe --abbrev=0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX)
    +git checkout -b hotfix/v[NEW_HOTFIX] master
    +
  4. +
  5. Fix whatever needs to be fixed
  6. +
  7. Update the change log
  8. +
  9. Tag and merge the hotfix +
    git tag v[NEW_HOTFIX]
    +git checkout develop
    +git merge hotfix/v[NEW_HOTFIX]
    +git push --tags origin develop
    +git branch -d hotfix/v[NEW_HOTFIX]
    +
  10. +
  11. Fast-forward master +
    git checkout master
    +git merge --ff-only v[NEW_HOTFIX]
    +git push
    +
  12. +
  13. Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)
  14. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/remote-support/index.html b/1.3/developers/remote-support/index.html new file mode 100644 index 00000000..3b2247ca --- /dev/null +++ b/1.3/developers/remote-support/index.html @@ -0,0 +1,1897 @@ + + + + + + + + + + + + + + + + + + + + + + Remote Support - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Remote Support

+

We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible.

+
    +
  1. Install Visual Studio Code
  2. +
  3. Open your RAPIDS root folder in a new VSCode window
  4. +
  5. Open a new terminal in Visual Studio Code Terminal > New terminal
  6. +
  7. Install the Live Share extension pack
  8. +
  9. +

    Press Ctrl+P or Cmd+P and run this command:

    +
    >live share: start collaboration session
    +
    +
  10. +
  11. +

    Follow the instructions and share the session link you receive

    +
  12. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/test-cases/index.html b/1.3/developers/test-cases/index.html new file mode 100644 index 00000000..61231d0f --- /dev/null +++ b/1.3/developers/test-cases/index.html @@ -0,0 +1,2736 @@ + + + + + + + + + + + + + + + + + + + + + + Test cases - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Test Cases

+

Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively.

+

The following is a list of the sensors that testing is currently available.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SensorProviderPeriodicFrequencyEvent
Phone AccelerometerPandaNNN
Phone AccelerometerRAPIDSNNN
Phone Activity RecognitionRAPIDSNNN
Phone Applications ForegroundRAPIDSNNN
Phone BatteryRAPIDSYYN
Phone BluetoothDoryabNNN
Phone BluetoothRAPIDSYYY
Phone CallsRAPIDSYYN
Phone ConversationRAPIDSYYN
Phone Data YieldRAPIDSNNN
Phone LightRAPIDSYYN
Phone LocationsDoryabNNN
Phone LocationsBarnettNNN
Phone MessagesRAPIDSYYN
Phone ScreenRAPIDSYNN
Phone WiFi ConnectedRAPIDSYYN
Phone WiFi VisibleRAPIDSYYN
Fitbit Calories IntradayRAPIDSYYY
Fitbit Data YieldRAPIDSNNN
Fitbit Heart Rate SummaryRAPIDSNNN
Fitbit Heart Rate IntradayRAPIDSNNN
Fitbit Sleep SummaryRAPIDSNNN
Fitbit Sleep IntradayRAPIDSYYY
Fitbit Sleep IntradayPRICEYYY
Fitbit Steps SummaryRAPIDSNNN
Fitbit Steps IntradayRAPIDSNNN
+

Messages (SMS)

+
    +
  • The raw message data file contains data for 2 separate days.
  • +
  • The data for the first day contains records 5 records for every + epoch.
  • +
  • The second day's data contains 6 records for each of only 2 + epoch (currently morning and evening)
  • +
  • The raw message data contains records for both message_types + (i.e. recieved and sent) in both days in all epochs. The + number records with each message_types per epoch is randomly + distributed There is at least one records with each + message_types per epoch.
  • +
  • There is one raw message data file each, as described above, for + testing both iOS and Android data.
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files
  • +
+

Calls

+

Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv. This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable.

+
    +
  • The call data would contain data for 2 days.
  • +
  • The data for the first day contains 6 records for every epoch.
  • +
  • The second day's data contains 6 records for each of only 2 + epoch (currently morning and evening)
  • +
  • The call data contains records for all call_types (i.e. + incoming, outgoing and missed) in both days in all epochs. + The number records with each of the call_types per epoch is + randomly distributed. There is at least one records with each + call_types per epoch.
  • +
  • There is one call data file each, as described above, for testing + both iOS and Android data.
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files
  • +
+

Screen

+

Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv. This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor

+
    +
  • The screen delta data file contains data for 1 day.
  • +
  • The screen delta data contains 1 record to represent an unlock + episode that falls within an epoch for every epoch.
  • +
  • The screen delta data contains 1 record to represent an unlock + episode that falls across the boundary of 2 epochs. Namely the + unlock episode starts in one epoch and ends in the next, thus + there is a record for unlock episodes that fall across night + to morning, morning to afternoon and finally afternoon to + night
  • +
  • The testing is done for unlock episode_type.
  • +
  • There is one screen data file each for testing both iOS and + Android data formats.
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files
  • +
+

Battery

+

Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv. This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor.

+
    +
  • The battery delta data file contains data for 1 day.
  • +
  • The battery delta data contains 1 record each for a charging and + discharging episode that falls within an epoch for every + epoch. Thus, for the daily epoch there would be multiple + charging and discharging episodes
  • +
  • Since either a charging episode or a discharging episode and + not both can occur across epochs, in order to test episodes that + occur across epochs alternating episodes of charging and + discharging episodes that fall across night to morning, + morning to afternoon and finally afternoon to night are + present in the battery delta data. This starts with a + discharging episode that begins in night and end in morning.
  • +
  • There is one battery data file each, for testing both iOS and + Android data formats.
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files
  • +
+

Bluetooth

+
    +
  • The raw Bluetooth data file contains data for 1 day.
  • +
  • The raw Bluetooth data contains at least 2 records for each + epoch. Each epoch has a record with a timestamp for the + beginning boundary for that epoch and a record with a + timestamp for the ending boundary for that epoch. (e.g. For + the morning epoch there is a record with a timestamp for + 6:00AM and another record with a timestamp for 11:59:59AM. + These are to test edge cases)
  • +
  • An option of 5 Bluetooth devices are randomly distributed + throughout the data records.
  • +
  • There is one raw Bluetooth data file each, for testing both iOS + and Android data formats.
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files.
  • +
+

WIFI

+
    +
  • There are 2 data files (wifi_raw.csv and sensor_wifi_raw.csv) + for each fake participant for each phone platform.
  • +
  • The raw WIFI data files contain data for 1 day.
  • +
  • The sensor_wifi_raw.csv data contains at least 2 records for + each epoch. Each epoch has a record with a timestamp for the + beginning boundary for that epoch and a record with a + timestamp for the ending boundary for that epoch. (e.g. For + the morning epoch there is a record with a timestamp for + 6:00AM and another record with a timestamp for 11:59:59AM. + These are to test edge cases)
  • +
  • The wifi_raw.csv data contains 3 records with random timestamps + for each epoch to represent visible broadcasting WIFI network. + This file is empty for the iOS phone testing data.
  • +
  • An option of 10 access point devices is randomly distributed + throughout the data records. 5 each for sensor_wifi_raw.csv and + wifi_raw.csv.
  • +
  • There data files for testing both iOS and Android data formats.
  • +
  • There are also additional empty data files for both android and + iOS for testing empty data files.
  • +
+

Light

+
    +
  • The raw light data file contains data for 1 day.
  • +
  • The raw light data contains 3 or 4 rows of data for each epoch + except night. The single row of data for night is for testing + features for single values inputs. (Example testing the standard + deviation of one input value)
  • +
  • Since light is only available for Android there is only one file + that contains data for Android. All other files (i.e. for iPhone) + are empty data files.
  • +
+

Locations

+

Description

+
    +
  • The participant’s home location is (latitude=1, longitude=1).
  • +
  • From Sat 10:56:00 to Sat 11:04:00, the center of the cluster is (latitude=-100, longitude=-100).
  • +
  • From Sun 03:30:00 to Sun 03:47:00, the center of the cluster is (latitude=1, longitude=1). Home location is extracted from this period.
  • +
  • From Sun 11:30:00 to Sun 11:38:00, the center of the cluster is (latitude=100, longitude=100).
  • +
+

Application Foreground

+
    +
  • The raw application foreground data file contains data for 1 day.
  • +
  • The raw application foreground data contains 7 - 9 rows of data + for each epoch. The records for each epoch contains apps that + are randomly selected from a list of apps that are from the + MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See + testing_config.yaml). There are also records in each epoch + that have apps randomly selected from a list of apps that are from + the EXCLUDED_CATEGORIES and EXCLUDED_APPS. This is to test + that these apps are actually being excluded from the calculations + of features. There are also records to test SINGLE_APPS + calculations.
  • +
  • Since application foreground is only available for Android there + is only one file that contains data for Android. All other files + (i.e. for iPhone) are empty data files.
  • +
+

Activity Recognition

+
    +
  • The raw Activity Recognition data file contains data for 1 day.
  • +
  • The raw Activity Recognition data each epoch period contains + rows that records 2 - 5 different activity_types. The is such + that durations of activities can be tested. Additionally, there + are records that mimic the duration of an activity over the time + boundary of neighboring epochs. (For example, there a set of + records that mimic the participant in_vehicle from afternoon + into evening)
  • +
  • There is one file each with raw Activity Recognition data for + testing both iOS and Android data formats. + (plugin_google_activity_recognition_raw.csv for android and + plugin_ios_activity_recognition_raw.csv for iOS)
  • +
  • There is also an additional empty data file for both android and + iOS for testing empty data files.
  • +
+

Conversation

+
    +
  • The raw conversation data file contains data for 2 day.
  • +
  • The raw conversation data contains records with a sample of both + datatypes (i.e. voice/noise = 0, and conversation = 2 ) + as well as rows with for samples of each of the inference values + (i.e. silence = 0, noise = 1, voice = 2, and unknown + = 3) for each epoch. The different datatype and inference + records are randomly distributed throughout the epoch.
  • +
  • Additionally there are 2 - 5 records for conversations (datatype + = 2, and inference = -1) in each epoch and for each epoch + except night, there is a conversation record that has a + double_convo_start timestamp that is from the previous + epoch. This is to test the calculations of features across + epochs.
  • +
  • There is a raw conversation data file for both android and iOS + platforms (plugin_studentlife_audio_android_raw.csv and + plugin_studentlife_audio_raw.csv respectively).
  • +
  • Finally, there are also additional empty data files for both + android and iOS for testing empty data files
  • +
+

Keyboard

+
    +
  • The raw keyboard data file contains data for 4 days.
  • +
  • +

    The raw keyboard data contains records with difference in timestamp ranging from + milliseconds to seconds.

    +
  • +
  • +

    With difference in timestamps between consecutive records more than 5 seconds helps us to create separate + sessions within the usage of the same app. This helps to verify the case where sessions have to be different.

    +
  • +
  • +

    The raw keyboard data contains records where the difference in text is less + than 5 seconds which makes it into 1 session but because of difference of app + new session starts. This edge case determines the behaviour within particular app + and also within 5 seconds.

    +
  • +
  • +

    The raw keyboard data also contains the records where length of current_text varies between consecutive rows. This helps us to tests on the cases where input text is entered by auto-suggested + or auto-correct operations.

    +
  • +
  • +

    One three-minute episode with a 1-minute row on Sun 08:59:54.65 and 09:00:00,another on Sun 12:01:02 that are considering a single episode in multi-timezone event segments to showcase how + inferring time zone data for Keyboard from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes.

    +
  • +
+

Fitbit Calories Intraday

+

Description

+
    +
  • A five-minute sedentary episode on Fri 11:00:00
  • +
  • A one-minute sedentary episode on Sun 02:00:00. It exists in November but not in February in STZ
  • +
  • A five-minute sedentary episode on Fri 11:58:00. It is split within two 30-min segments and the morning
  • +
  • A three-minute lightly active episode on Fri 11:10:00, a one-minute at 11:18:00 and a one-minute 11:24:00. These check for start and end times of first/last/longest episode
  • +
  • A three-minute fairly active episode on Fri 11:40:00, a one-minute at 11:48:00 and a one-minute 11:54:00. These check for start and end times of first/last/longest episode
  • +
  • A three-minute very active episode on Fri 12:10:00, a one-minute at 12:18:00 and a one-minute 12:24:00. These check for start and end times of first/last/longest episode
  • +
  • A eight-minute MVPA episode with intertwined fairly and very active rows on Fri 12:30:00
  • +
  • The above episodes contain six higmet (>= 3 MET) episodes and nine lowmet episodes.
  • +
  • One two-minute sedentary episode with a 1-minute row on Sun 09:00:00 and another on Sun 12:01:01 that are considering a single episode in multi-timezone event segments to showcase how inferring time zone data for Fitbit from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes.
  • +
  • A three-minute sedentary episode on Sat 08:59 that will be ignored for multi-timezone event segments.
  • +
  • A three-minute sedentary episode on Sat 12:59 of which the first minute will be ignored for multi-timezone event segments since the test segment starts at 13:00
  • +
  • A three-minute sedentary episode on Sat 16:00
  • +
  • A four-minute sedentary episode on Sun 10:01 that will be ignored for Novembers’s multi-timezone event segments since the test segment ends at 10am on that weekend.
  • +
  • A three-minute very active episode on Sat 16:03. This episode and the one at 16:00 are counted as one for lowmet episodes
  • +
+

Checklist

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
time segmentsingle tzmulti tzplatform
30minOKOKfitbit
morningOKOKfitbit
dailyOKOKfitbit
threedayOKOKfitbit
weekendOKOKfitbit
beforeMarchEventOKOKfitbit
beforeNovemberEventOKOKfitbit
+

Fitbit Sleep Summary

+

Description

+
    +
  • A main sleep episode that starts on Fri 20:00:00 and ends on Sat 02:00:00. This episode starts after 11am (Last Night End) which will be considered as today’s (Fri) data.
  • +
  • A nap that starts on Sat 04:00:00 and ends on Sat 06:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday’s (Fri) data.
  • +
  • A nap that starts on Sat 13:00:00 and ends on Sat 15:00:00. This episode starts after 11am (Last Night End) which will be considered as today’s (Sat) data.
  • +
  • A main sleep that starts on Sun 01:00:00 and ends on Sun 12:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday’s (Sat) data.
  • +
  • A main sleep that starts on Sun 23:00:00 and ends on Mon 07:00:00. This episode starts after 11am (Last Night End) which will be considered as today’s (Sun) data.
  • +
  • Any segment shorter than one day will be ignored for sleep RAPIDS features.
  • +
+

Checklist

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
time segmentsingle tzmulti tzplatform
30minOKOKfitbit
morningOKOKfitbit
dailyOKOKfitbit
threedayOKOKfitbit
weekendOKOKfitbit
beforeMarchEventOKOKfitbit
beforeNovemberEventOKOKfitbit
+

Fitbit Sleep Intraday

+

Description

+
    +
  • A five-minute main sleep episode with asleep-classic level on Fri 11:00:00.
  • +
  • An eight-hour main sleep episode on Fri 17:00:00. It is split into 2 parts for daily segment: a seven-hour sleep episode on Fri 17:00:00 and an one-hour sleep episode on Sat 00:00:00.
  • +
  • A two-hour nap on Sat 01:00:00 that will be ignored for main sleep features.
  • +
  • An one-hour nap on Sat 13:00:00 that will be ignored for main sleep features.
  • +
  • An eight-hour main sleep episode on Sat 22:00:00. This episode ends on Sun 08:00:00 (NY) for March and Sun 06:00:00 (NY) for Novembers due to daylight savings. It will be considered for beforeMarchEvent segment and ignored for beforeNovemberEvent segment.
  • +
  • A nine-hour main sleep episode on Sun 11:00:00. Start time will be assigned as NY time zone and converted to 14:00:00.
  • +
  • A seven-hour main sleep episode on Mon 06:00:00. This episode will be split into two parts: a five-hour sleep episode on Mon 06:00:00 and a two-hour sleep episode on Mon 11:00:00. The first part will be discarded as it is before 11am (Last Night End)
  • +
  • Any segment shorter than one day will be ignored for sleep PRICE features.
  • +
+

Checklist

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
time segmentsingle tzmulti tzplatform
30minOKOKfitbit
morningOKOKfitbit
dailyOKOKfitbit
threedayOKOKfitbit
weekendOKOKfitbit
beforeMarchEventOKOKfitbit
beforeNovemberEventOKOKfitbit
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/testing/index.html b/1.3/developers/testing/index.html new file mode 100644 index 00000000..3453da22 --- /dev/null +++ b/1.3/developers/testing/index.html @@ -0,0 +1,2136 @@ + + + + + + + + + + + + + + + + + + + + + + Testing - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Testing

+

The following is a simple guide to run RAPIDS’ tests. All files necessary for testing are stored in the ./tests/ directory

+

Steps for Testing

+
Testing Overview
    +
  1. You have to create a single four day test dataset for the sensor you are working on.
  2. +
  3. You will adjust your dataset with tests/script/assign_test_timestamps.py to fit Fri March 6th 2020 - Mon March 9th 2020 and Fri Oct 30th 2020 - Mon Nov 2nd 2020. We test daylight saving times with these dates.
  4. +
  5. We have one test participant per platform (pids: android, ios, fitbit, empatica, empty). The data device_id should be equal to the pid.
  6. +
  7. We will run this test dataset against six test pipelines, three for frequency, periodic, and event time segments in a single time zone, and the same three in multiple time zones.
  8. +
  9. You will have to create your test data to cover as many corner cases as possible. These cases depend on the sensor you are working on.
  10. +
  11. The time segments and time zones to be tested are:
  12. +
+
Frequency
    +
  • 30 minutes (30min,30)
  • +
+
+
Periodic
    +
  • morning (morning,06:00:00,5H 59M 59S,every_day,0)
  • +
  • daily (daily,00:00:00,23H 59M 59S,every_day,0)
  • +
  • three-day segments that repeat every day (threeday,00:00:00,71H 59M 59S,every_day,0)
  • +
  • three-day segments that repeat every Friday (weekend,00:00:00,71H 59M 59S,wday,5)
  • +
+
+
Event
    +
  • A segment that starts 3 hour before an event (Sat Mar 07 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves forward and the period 2am-3am does not exist. In this case, the segment would start on Sat Mar 07 2020 16:00:00 EST (timestamp: 1583614800000) and end on Sun Mar 08 2020 15:00:00 EST (timestamp: 1583694000000). (beforeMarchEvent,1583625600000,22H,3H,-1,android)
  • +
  • A segment that starts 3 hour before an event (Sat Oct 31 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves back and the period 1am-2am exists twice. In this case, the segment would start on Sat Oct 31 2020 16:00:00 EST (timestamp: 1604174400000) and end on Sun Nov 01 2020 13:00:00 EST (timestamp: 1604253600000). (beforeNovemberEvent,1604185200000,22H,3H,-1,android)
  • +
+
+
Single time zone to test

America/New_York

+
+
Multi time zones to test
    +
  • America/New_York starting at 0
  • +
  • America/Los_Angeles starting at 1583600400000 (Sat Mar 07 2020 12:00:00 EST)
  • +
  • America/New_York starting at 1583683200000 (Sun Mar 08 2020 12:00:00 EST)
  • +
  • America/Los_Angeles starting at 1604160000000 (Sat Oct 31 2020 12:00:00 EST)
  • +
  • America/New_York starting at 1604250000000 (Sun Nov 01 2020 12:00:00 EST)
  • +
+
+
Understanding event segments with multi timezones

+ +

+
+
+
Document your tests
    +
  • Before you start implementing any test data you need to document your tests.
  • +
  • The documentation of your tests should be added to docs/developers/test-cases.md under the corresponding sensor.
  • +
  • You will need to add two subsections Description and the Checklist
  • +
  • The amount of data you need depends on each sensor but you can be efficient by creating data that covers corner cases in more than one time segment. For example, a battery episode from 11am to 1pm, covers the case when an episode has to be split for 30min frequency segments and for morning segments.
  • +
  • As a rule of thumb think about corner cases for 30min segments as they will give you the most flexibility.
  • +
  • Only add tests for iOS if the raw data format is different than Android’s (for example for screen)
  • +
  • Create specific tests for Sunday before and after 02:00. These will test daylight saving switches, in March 02:00 to 02:59 do not exist, and in November 01:00 to 01:59 exist twice (read below how tests/script/assign_test_timestamps.py handles this)
  • +
+
Example of Description

Description is a list and every item describes the different scenarios your test data is covering. For example, if we are testing PHONE_BATTERY:

+
- We test 24 discharge episodes, 24 charge episodes and 2 episodes with a 0 discharge rate
+- One episode is shorter than 30 minutes (`start timestamp` to `end timestamp`)
+- One episode is 120 minutes long from 11:00 to 13:00 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for morning segments
+- One episode is 60 minutes long from 23:30 to 00:30 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for daly segments (overnight)
+- One 0 discharge rate episode 10 minutes long that happens within a 30-minute segment (10:00 to 10:29) (`start timestamp` to `end timestamp`)
+- Three discharge episodes that happen between during beforeMarchEvent (start/end timestamps of those discharge episodes)
+- Three charge episodes that happen between during beforeMarchEvent (start/end timestamps of those charge episodes)
+- One discharge episode that happen between 00:30 and 04:00 to test for daylight saving times in March and Novemeber 2020.
+- ... any other test corner cases you can think of
+
+

Describe your test cases in as much detail as possible so in the future if we find a bug in RAPIDS, we know what test case we did not include and should add.

+
+
Example of Checklist

Checklist is a table where you confirm you have verified the output of your dataset for the different time segments and time zones

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
time segmentsingle tzmulti tzplatform
30minOKOKandroid and iOS
morningOKOKandroid and iOS
dailyOKOKandroid and iOS
threedayOKOKandroid and iOS
weekendOKOKandroid and iOS
beforeMarchEventOKOKandroid and iOS
beforeNovemberEventOKOKandroid and iOS
+
+
+
Add raw input data.
    +
  1. Add the raw test data to the corresponding sensor CSV file in tests/data/manual/aware_csv/SENSOR_raw.csv. Create the CSV if it does not exist.
  2. +
  3. The test data you create will have the same columns as normal raw data except test_time replaces timestamp. To make your life easier, you can place a test data row in time using the test_time column with the following format: Day HH:MM:SS.XXX, for example Fri 22:54:30.597.
  4. +
  5. +

    You can convert your manual test data to actual raw test data with the following commands:

    +
      +
    • +

      For the selected files: (It could be a single file name or multiple file names separated by whitespace(s)) +

      python tests/scripts/assign_test_timestamps.py -f file_name_1 file_name_2
      +

      +
    • +
    • +

      For all files under the tests/data/manual/aware_csv folder: +

      python tests/scripts/assign_test_timestamps.py -a
      +

      +
    • +
    +
  6. +
  7. +

    The script assign_test_timestamps.py converts you test_time column into a timestamp. For example, Fri 22:54:30.597 is converted to 1583553270597 (Fri Mar 06 2020 22:54:30 GMT-0500) and to 1604112870597 (Fri Oct 30 2020 22:54:30 GMT-0400). Note you can include milliseconds.

    +
  8. +
  9. The device_id should be the same as pid.
  10. +
+
Example of test data you need to create

The test_time column will be automatically converted to a timestamp that fits our testing periods in March and November by tests/script/assign_test_timestamps.py

+
test_time,device_id,battery_level,battery_scale,battery_status
+Fri 01:00:00.000,ios,90,100,4
+Fri 01:00:30.500,ios,89,100,4
+Fri 01:01:00.000,ios,80,100,4
+Fri 01:01:45.500,ios,79,100,4
+...
+Sat 08:00:00.000,ios,78,100,4
+Sat 08:01:00.000,ios,50,100,4
+Sat 08:02:00.000,ios,49,100,4
+
+
+
+
Add expected output data.
    +
  1. Add or update the expected output feature file of the participant and sensor you are testing: +
    tests/data/processed/features/{type_of_time_segment}/{pid}/device_sensor.csv 
    +
    +# this example is expected output data for battery tests for periodic segments in a single timezone
    +tests/data/processed/features/stz_periodic/android/phone_sensor.csv 
    +
    +# this example is expected output data for battery tests for periodic segments in multi timezones
    +tests/data/processed/features/mtz_periodic/android/phone_sensor.csv 
    +
  2. +
+
+
Edit the config file(s).
    +
  1. Activate the sensor provider you are testing if it isn’t already. Set [SENSOR][PROVIDER][COMPUTE] to TRUE in the config.yaml of the time segments and time zones you are testing: +
    - tests/settings/stz_frequency_config.yaml # For single-timezone frequency time segments
    +- tests/settings/stz_periodic_config.yaml # For single-timezone periodic time segments
    +- tests/settings/stz_event_config.yaml # For single-timezone event time segments
    +
    +- tests/settings/mtz_frequency_config.yaml # For multi-timezone frequency time segments
    +- tests/settings/mtz_periodic_config.yaml # For multi-timezone periodic time segments
    +- tests/settings/mtz_event_config.yaml # For multi-timezone event time segments
    +
  2. +
+
+
Run the pipeline and tests.
    +
  1. You can run all six segment pipelines and their tests +
    bash tests/scripts/run_tests.sh -t all
    +
  2. +
  3. You can run only the pipeline of a specific time segment and its tests +
    bash tests/scripts/run_tests.sh -t stz_frequency -a both # swap stz_frequency for mtz_frequency, stz_event, mtz_event, etc
    +
  4. +
  5. Or, if you are working on your tests and you want to run a pipeline and its tests independently +
    bash tests/scripts/run_tests.sh -t stz_frequency -a run
    +bash tests/scripts/run_tests.sh -t stz_frequency -a test
    +
  6. +
+
How does the test execution work?

This bash script tests/scripts/run_tests.sh executes one or all test pipelines for different time segment types (frequency, periodic, and events) and single or multiple timezones.

+

The python script tests/scripts/run_tests.py runs the tests. It parses the involved participants and active sensor providers in the config.yaml file of the time segment type and time zone being tested. We test that the output file we expect exists and that its content matches the expected values.

+
+
Output Example

The following is a snippet of the output you should see after running your test.

+
test_sensors_files_exist (test_sensor_features.TestSensorFeatures) ... stz_periodic
+ok
+test_sensors_features_calculations (test_sensor_features.TestSensorFeatures) ... stz_periodic
+ok
+
+test_sensors_files_exist (test_sensor_features.TestSensorFeatures) ... stz_frequency
+ok
+test_sensors_features_calculations (test_sensor_features.TestSensorFeatures) ... stz_frequency
+FAIL
+
+

The results above show that the for stz_periodic, both test_sensors_files_exist and test_sensors_features_calculations passed. While for stz_frequency, the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. Additionally, you should get the traceback of the failure (not shown here).

+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/validation-schema-config/index.html b/1.3/developers/validation-schema-config/index.html new file mode 100644 index 00000000..0c3259f2 --- /dev/null +++ b/1.3/developers/validation-schema-config/index.html @@ -0,0 +1,2171 @@ + + + + + + + + + + + + + + + + + + + + + + Validation schema of config.yaml - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Validation schema of config.yaml

+
+

Why do we need to validate the config.yaml?

+

Most of the key/values in the config.yaml are constrained to a set of possible values or types. For example [TIME_SEGMENTS][TYPE] can only be one of ["FREQUENCY", "PERIODIC", "EVENT"], and [TIMEZONE] has to be a string.

+

We should show the user an error if that’s not the case. We could validate this in Python or R but since we reuse scripts and keys in multiple places, tracking these validations can be time consuming and get out of control. Thus, we do these validations through a schema and check that schema before RAPIDS starts processing any data so the user can see the error right away.

+

Keep in mind these validations can only cover certain base cases. Some validations that require more complex logic should still be done in the respective script. For example, we can check that a CSV file path actually ends in .csv but we can only check that the file actually exists in a Python script.

+
+

The structure and values of the config.yaml file are validated using a YAML schema stored in tools/config.schema.yaml. Each key in config.yaml, for example PIDS, has a corresponding entry in the schema where we can validate its type, possible values, required properties, min and max values, among other things.

+

The config.yaml is validated against the schema every time RAPIDS runs (see the top of the Snakefile):

+
validate(config, "tools/config.schema.yaml")
+
+

Structure of the schema

+

The schema has three main sections required, definitions, and properties. All of them are just nested key/value YAML pairs, where the value can be a primitive type (integer, string, boolean, number) or can be another key/value pair (object).

+

required

+

required lists properties that should be present in the config.yaml. We will almost always add every config.yaml key to this list (meaning that the user cannot delete any of those keys like TIMEZONE or PIDS).

+

definitions

+

definitions lists key/values that are common to different properties so we can reuse them. You can define a key/value under definitions and use $ref to refer to it in any property.

+

For example, every sensor like [PHONE_ACCELEROMETER] has one or more providers like RAPIDS and PANDA, these providers have some common properties like the COMPUTE flag or the SRC_SCRIPT string. Therefore we define a shared provider “template” that is used by every provider and extended with properties exclusive to each one of them. For example:

+
+

The PROVIDER definition will be used later on different properties.

+
PROVIDER:
+    type: object
+    required: [COMPUTE, SRC_SCRIPT, FEATURES]
+    properties:
+    COMPUTE:
+        type: boolean
+    FEATURES:
+        type: [array, object]
+    SRC_SCRIPT:
+        type: string
+        pattern: "^.*\\.(py|R)$"
+
+
+
+

Notice that RAPIDS (a provider) uses and extends the PROVIDER template in this example. The FEATURES key is overriding the FEATURES key from the #/definitions/PROVIDER template but is keeping the validation for COMPUTE, and SRC_SCRIPT. For more details about reusing properties, go to this link

+
PHONE_ACCELEROMETER:
+    type: object
+     # .. other properties
+    PROVIDERS:
+        type: ["null", object]
+        properties:
+        RAPIDS:
+            allOf:
+            - $ref: "#/definitions/PROVIDER"
+            - properties:
+                FEATURES: 
+                    type: array
+                    uniqueItems: True
+                    items:
+                    type: string
+                    enum: ["maxmagnitude", "minmagnitude", "avgmagnitude", "medianmagnitude", "stdmagnitude"]
+
+
+
+

properties

+

properties are nested key/values that describe the different components of our config.yaml file. Values can be of one or more primitive types like string, number, array, boolean and null. Values can also be another key/value pair (of type object) that are similar to a dictionary in Python.

+

For example, the following property validates the PIDS of our config.yaml. It checks that PIDS is an array with unique items of type string.

+
PIDS:
+    type: array
+    uniqueItems: True
+    items:
+      type: string
+
+

Modifying the schema

+
+

Validating the config.yaml during development

+

If you updated the schema and want to check the config.yaml is compliant, you can run the command snakemake --list-params-changes. You will see Building DAG of jobs... if there are no problems or an error message otherwise (try setting any COMPUTE flag to a string like test instead of False/True).

+

You can use this command without having to configure RAPIDS to process any participants or sensors.

+
+

You can validate different aspects of each key/value in our config.yaml file:

+
+

Including min and max values +

MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS:
+    type: number
+    minimum: 0
+    maximum: 1
+
+FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD:
+    type: integer
+    exclusiveMinimum: 0
+

+
+
+

Including valid values (enum) +

items:
+    type: string
+    enum: ["count", "maxlux", "minlux", "avglux", "medianlux", "stdlux"]
+

+
+
+
MINUTES_DATA_USED:
+    type: boolean
+
+
+
+

Including whether or not it should have unique values, the type of the array’s elements (strings, numbers) and valid values (enum). +

MESSAGES_TYPES:
+    type: array
+    uniqueItems: True
+    items:
+        type: string
+        enum: ["received", "sent"]
+

+
+
+

PARENT is an object that has two properties. KID1 is one of those properties that are, in turn, another object that will reuse the "#/definitions/PROVIDER" definition AND also include (extend) two extra properties GRAND_KID1 of type array and GRAND_KID2 of type number. KID2 is another property of PARENT of type boolean.

+

The schema validation looks like this +

PARENT:
+    type: object
+    properties:
+      KID1:
+        allOf:
+          - $ref: "#/definitions/PROVIDER"
+          - properties:
+              GRAND_KID1:
+                type: array
+                uniqueItems: True
+              GRAND_KID2:
+                type: number
+      KID2:
+        type: boolean
+

+

The config.yaml key that the previous schema validates looks like this: +

PARENT:
+    KID1:
+        # These four come from the `PROVIDER` definition (template)
+        COMPUTE: False
+        FEATURES: [x, y] # an array
+        SRC_SCRIPT: "a path to a py or R script"
+
+        # This two come from the extension
+        GRAND_KID1: [a, b] # an array
+        GRAND_KID2: 5.1 # an number
+     KID2: True # a boolean
+

+
+
+

Verifying the schema is correct

+

We recommend that before you start modifying the schema you modify the config.yaml key that you want to validate with an invalid value. For example, if you want to validate that COMPUTE is boolean, you set COMPUTE: 123. Then create your validation, run snakemake --list-params-changes and make sure your validation fails (123 is not boolean), and then set the key to the correct value. In other words, make sure it’s broken first so that you know that your validation works.

+
+

Warning

+

Be careful. You can check that the schema config.schema.yaml has a valid format by running python tools/check_schema.py. You will see this message if its structure is correct: Schema is OK. However, we don’t have a way to detect typos, for example allOf will work but allOF won’t (capital F) and it won’t show any error. That’s why we recommend to start with an invalid key/value in your config.yaml so that you can be sure the schema validation finds the problem.

+
+

Useful resources

+

Read the following links to learn more about what we can validate with schemas. They are based on JSON instead of YAML schemas but the same concepts apply.

+ + + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/developers/virtual-environments/index.html b/1.3/developers/virtual-environments/index.html new file mode 100644 index 00000000..1db91a43 --- /dev/null +++ b/1.3/developers/virtual-environments/index.html @@ -0,0 +1,2111 @@ + + + + + + + + + + + + + + + + + + + + + + Virtual Environments - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Virtual Environments

+ +

Python Virtual Environment

+

Add new packages

+

Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active (conda activate YOUR_ENV).

+

Remove packages

+

Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME

+

Updating all packages

+

Make sure your Python virtual environment is active (conda activate YOUR_ENV), then run +

conda update --all
+

+

Update your conda environment.yaml

+

After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran and mkl to keep compatibility with Linux: +

conda env export --no-builds | sed 's/^.*libgfortran.*$/  - libgfortran/' | sed 's/^.*mkl=.*$/  - mkl/' >  environment.yml
+

+

R Virtual Environment

+

Add new packages

+
    +
  1. Open your terminal and navigate to RAPIDS’ root folder
  2. +
  3. Run R to open an R interactive session
  4. +
  5. Run renv::install("PACKAGE_NAME")
  6. +
+

Remove packages

+
    +
  1. Open your terminal and navigate to RAPIDS’ root folder
  2. +
  3. Run R to open an R interactive session
  4. +
  5. Run renv::remove("PACKAGE_NAME")
  6. +
+

Updating all packages

+
    +
  1. Open your terminal and navigate to RAPIDS’ root folder
  2. +
  3. Run R to open an R interactive session
  4. +
  5. Run renv::update()
  6. +
+

Update your R renv.lock

+

After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline.

+
    +
  1. Open your terminal and navigate to RAPIDS’ root folder
  2. +
  3. Run R to open an R interactive session
  4. +
  5. Run renv::snapshot() (renv will ask you to confirm any updates to this file)
  6. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/add-new-features/index.html b/1.3/features/add-new-features/index.html new file mode 100644 index 00000000..71db97b9 --- /dev/null +++ b/1.3/features/add-new-features/index.html @@ -0,0 +1,2223 @@ + + + + + + + + + + + + + + + + + + + + + + Add New Features - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Add New Features

+
+

Hint

+
    +
  • We recommend reading the Behavioral Features Introduction before reading this page.
  • +
  • You can implement new features in Python or R scripts.
  • +
  • You won’t have to deal with time zones, dates, times, data cleaning, or preprocessing. The data that RAPIDS pipes to your feature extraction code are ready to process.
  • +
+
+

New Features for Existing Sensors

+

You can add new features to any existing sensors (see list below) by adding a new provider in three steps:

+
    +
  1. Modify the config.yaml file
  2. +
  3. Create your feature provider script
  4. +
  5. Implement your features extraction code
  6. +
+

As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1, feature2, feature3 with a Python script that requires a parameter from the user called MY_PARAMETER.

+
Existing Sensors

An existing sensor of any device with a configuration entry in config.yaml:

+

Smartphone (AWARE)

+
    +
  • Phone Accelerometer
  • +
  • Phone Activity Recognition
  • +
  • Phone Applications Crashes
  • +
  • Phone Applications Foreground
  • +
  • Phone Applications Notifications
  • +
  • Phone Battery
  • +
  • Phone Bluetooth
  • +
  • Phone Calls
  • +
  • Phone Conversation
  • +
  • Phone Data Yield
  • +
  • Phone Keyboard
  • +
  • Phone Light
  • +
  • Phone Locations
  • +
  • Phone Log
  • +
  • Phone Messages
  • +
  • Phone Screen
  • +
  • Phone WiFI Connected
  • +
  • Phone WiFI Visible
  • +
+

Fitbit

+
    +
  • Fitbit Data Yield
  • +
  • Fitbit Heart Rate Summary
  • +
  • Fitbit Heart Rate Intraday
  • +
  • Fitbit Sleep Summary
  • +
  • Fitbit Sleep Intraday
  • +
  • Fitbit Steps Summary
  • +
  • Fitbit Steps Intraday
  • +
+

Empatica

+
    +
  • Empatica Accelerometer
  • +
  • Empatica Heart Rate
  • +
  • Empatica Temperature
  • +
  • Empatica Electrodermal Activity
  • +
  • Empatica Blood Volume Pulse
  • +
  • Empatica Inter Beat Interval
  • +
  • Empatica Tags
  • +
+
+

Modify the config.yaml file

+

In this step, you need to add your provider configuration section under the relevant sensor in config.yaml. See our example for our tutorial’s VEGA provider for PHONE_ACCELEROMETER:

+
Example configuration for a new accelerometer provider VEGA
PHONE_ACCELEROMETER:
+    CONTAINER: accelerometer
+    PROVIDERS:
+        RAPIDS: # this is a feature provider
+            COMPUTE: False
+            ...
+
+        PANDA: # this is another feature provider
+            COMPUTE: False
+            ...
+
+        VEGA: # this is our new feature provider
+            COMPUTE: False
+            FEATURES: ["feature1", "feature2", "feature3"]
+            MY_PARAMTER: a_string
+            SRC_SCRIPT: src/features/phone_accelerometer/vega/main.py
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                         Description
[COMPUTE]Flag to activate/deactivate your provider
[FEATURES]List of features your provider supports. Your provider code should only return the features on this list
[MY_PARAMTER]An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string, or an array of any of such types.
[SRC_SCRIPT]The relative path from RAPIDS’ root folder to a script that computes the features for this provider. It can be implemented in R or Python.
+

Create a feature provider script

+

Create your feature Python or R script called main.py or main.R in the correct folder, src/feature/[sensorname]/[providername]/. RAPIDS automatically loads and executes it based on the config key [SRC_SCRIPT] you added in the last step. For our example, this script is: +

src/feature/phone_accelerometer/vega/main.py
+

+

Implement your feature extraction code

+

Every feature script (main.[py|R]) needs a [providername]_features function with specific parameters. RAPIDS calls this function with the sensor data ready to process and with other functions and arguments you will need.

+
+
def [providername]_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
+    # empty for now
+    return(your_features_df)
+
+
+
+
[providername]_features <- function(sensor_data, time_segment, provider){
+    # empty for now
+    return(your_features_df)
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Parameter                                      Description
sensor_data_filesPath to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml)
time_segmentThe label of the time segment that should be processed.
providerThe parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example, this dictionary contains {MY_PARAMETER:"a_string"}
filter_data_by_segmentPython only. A function that you will use to filter your data. In R, this function is already available in the environment.
*argsPython only. Not used for now
**kwargsPython only. Not used for now
+

The next step is to implement the code that computes your behavioral features in your provider script’s function. As with any other script, this function can call other auxiliary methods, but in general terms, it should have three stages:

+
1. Read a participant’s data by loading the CSV data stored in the file pointed by sensor_data_files
acc_data = pd.read_csv(sensor_data_files["sensor_data"])
+
+

Note that the phone’s battery, screen, and activity recognition data are given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on)

+
+
2. Filter your data to process only those rows that belong to time_segment

This step is only one line of code, but keep reading to understand why we need it. +

acc_data = filter_data_by_segment(acc_data, time_segment)
+

+

You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with.

+

Let’s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment. A time segment is a period that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and weekend basis for p01. The labels are arbitrary, and the instances depend on the days a participant was monitored for:

+
    +
  • the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances
  • +
  • the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances.
  • +
  • the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances.
  • +
+

For this example, RAPIDS will call your provider function three times for p01, once where time_segment is my_days, once where time_segment is my_weeks, and once where time_segment is my_weekends. In this example, not every row in p01‘s data needs to take part in the feature computation for either segment and the rows need to be grouped differently.

+

Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment. This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01‘s my_days, my_weeks, and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features, for example:

+
acc_features["maxmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].max()
+
+

The reason RAPIDS does not filter the participant’s data set for you is because your code might need to compute something based on a participant’s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from that number.

+
+
3. Return a data frame with your features

After filtering, grouping your data, and computing your features, your provider function should return a data frame that has:

+
    +
  • One row per time segment instance (e.g., 14 our p01‘s my_days example)
  • +
  • The local_segment column added by filter_data_by_segment()
  • +
  • One column per feature. The name of your features should only contain letters or numbers (feature1) by convention. RAPIDS automatically adds the correct sensor and provider prefix; in our example, this prefix is phone_accelerometr_vega_.
  • +
+
+
PHONE_ACCELEROMETER Provider Example

For your reference, this our own provider (RAPIDS) for PHONE_ACCELEROMETER that computes five acceleration features

+
import pandas as pd
+import numpy as np
+
+def rapids_features(sensor_data_files, time_segment, provider, filter_data_by_segment, *args, **kwargs):
+
+    acc_data = pd.read_csv(sensor_data_files["sensor_data"])
+    requested_features = provider["FEATURES"]
+    # name of the features this function can compute
+    base_features_names = ["maxmagnitude", "minmagnitude", "avgmagnitude", "medianmagnitude", "stdmagnitude"]
+    # the subset of requested features this function can compute
+    features_to_compute = list(set(requested_features) & set(base_features_names))
+
+    acc_features = pd.DataFrame(columns=["local_segment"] + features_to_compute)
+    if not acc_data.empty:
+        acc_data = filter_data_by_segment(acc_data, time_segment)
+
+        if not acc_data.empty:
+            acc_features = pd.DataFrame()
+            # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2)
+            magnitude = acc_data.apply(lambda row: np.sqrt(row["double_values_0"] ** 2 + row["double_values_1"] ** 2 + row["double_values_2"] ** 2), axis=1)
+            acc_data = acc_data.assign(magnitude = magnitude.values)
+
+            if "maxmagnitude" in features_to_compute:
+                acc_features["maxmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].max()
+            if "minmagnitude" in features_to_compute:
+                acc_features["minmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].min()
+            if "avgmagnitude" in features_to_compute:
+                acc_features["avgmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].mean()
+            if "medianmagnitude" in features_to_compute:
+                acc_features["medianmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].median()
+            if "stdmagnitude" in features_to_compute:
+                acc_features["stdmagnitude"] = acc_data.groupby(["local_segment"])["magnitude"].std()
+
+            acc_features = acc_features.reset_index()
+
+    return acc_features
+
+
+

New Features for Non-Existing Sensors

+

If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the "Existing Sensors" list above), open a new discussion in Github and we can add the necessary code so you can follow the instructions above.

+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-accelerometer/index.html b/1.3/features/empatica-accelerometer/index.html new file mode 100644 index 00000000..65d7637a --- /dev/null +++ b/1.3/features/empatica-accelerometer/index.html @@ -0,0 +1,2021 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Accelerometer - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Accelerometer

+

Sensor parameters description for [EMPATICA_ACCELEROMETER]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing accelerometer data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_accelerometer_raw.csv
+- data/raw/{pid}/empatica_accelerometer_with_datetime.csv
+- data/interim/{pid}/empatica_accelerometer_features/empatica_accelerometer_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_accelerometer.csv
+
+
+

Parameters description for [EMPATICA_ACCELEROMETER][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_ACCELEROMETER features from the DBDP provider
[FEATURES]Features to be computed, see table below
+

Features description for [EMPATICA_ACCELEROMETER][PROVIDERS][RAPDBDPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxmagnitudem/s2The maximum magnitude of acceleration (\(\|acceleration\| = \sqrt{x^2 + y^2 + z^2}\)).
minmagnitudem/s2The minimum magnitude of acceleration.
avgmagnitudem/s2The average magnitude of acceleration.
medianmagnitudem/s2The median magnitude of acceleration.
stdmagnitudem/s2The standard deviation of acceleration.
+
+

Assumptions/Observations

+
    +
  1. Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is too big to fit in memory. We are considering different alternatives to overcome this problem, if this is something you need, get in touch and we can discuss how to implement it.
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-blood-volume-pulse/index.html b/1.3/features/empatica-blood-volume-pulse/index.html new file mode 100644 index 00000000..5076daea --- /dev/null +++ b/1.3/features/empatica-blood-volume-pulse/index.html @@ -0,0 +1,2039 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Blood Volume Pulse - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Blood Volume Pulse

+

Sensor parameters description for [EMPATICA_BLOOD_VOLUME_PULSE]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing blood volume pulse data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_blood_volume_pulse_raw.csv 
+- data/raw/{pid}/empatica_blood_volume_pulse_with_datetime.csv
+- data/interim/{pid}/empatica_blood_volume_pulse_features/empatica_blood_volume_pulse_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_blood_volume_pulse.csv
+
+
+

Parameters description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_BLOOD_VOLUME_PULSE features from the DBDP provider
[FEATURES]Features to be computed from blood volume pulse intraday data, see table below
+

Features description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxbvp-The maximum blood volume pulse during a time segment.
minbvp-The minimum blood volume pulse during a time segment.
avgbvp-The average blood volume pulse during a time segment.
medianbvp-The median of blood volume pulse during a time segment.
modebvp-The mode of blood volume pulse during a time segment.
stdbvp-The standard deviation of blood volume pulse during a time segment.
diffmaxmodebvp-The difference between the maximum and mode blood volume pulse during a time segment.
diffminmodebvp-The difference between the mode and minimum blood volume pulse during a time segment.
entropybvpnatsShannon’s entropy measurement based on blood volume pulse during a time segment.
+
+

Assumptions/Observations

+

For more information about BVP read this.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-electrodermal-activity/index.html b/1.3/features/empatica-electrodermal-activity/index.html new file mode 100644 index 00000000..8df2785a --- /dev/null +++ b/1.3/features/empatica-electrodermal-activity/index.html @@ -0,0 +1,2039 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Electrodermal Activity - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Electrodermal Activity

+

Sensor parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing electrodermal activity data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_electrodermal_activity_raw.csv
+- data/raw/{pid}/empatica_electrodermal_activity_with_datetime.csv
+- data/interim/{pid}/empatica_electrodermal_activity_features/empatica_electrodermal activity_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_electrodermal_activity.csv
+
+
+

Parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_ELECTRODERMAL_ACTIVITY features from the DBDP provider
[FEATURES]Features to be computed from electrodermal activity intraday data, see table below
+

Features description for [EMPATICA_ELECTRODERMAL ACTIVITY][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxedamicrosiemensThe maximum electrical conductance during a time segment.
minedamicrosiemensThe minimum electrical conductance during a time segment.
avgedamicrosiemensThe average electrical conductance during a time segment.
medianedamicrosiemensThe median of electrical conductance during a time segment.
modeedamicrosiemensThe mode of electrical conductance during a time segment.
stdedamicrosiemensThe standard deviation of electrical conductance during a time segment.
diffmaxmodeedamicrosiemensThe difference between the maximum and mode electrical conductance during a time segment.
diffminmodeedamicrosiemensThe difference between the mode and minimum electrical conductance during a time segment.
entropyedanatsShannon’s entropy measurement based on electrical conductance during a time segment.
+
+

Assumptions/Observations

+

None

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-heartrate/index.html b/1.3/features/empatica-heartrate/index.html new file mode 100644 index 00000000..167c362c --- /dev/null +++ b/1.3/features/empatica-heartrate/index.html @@ -0,0 +1,2039 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Heart Rate - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Heart Rate

+

Sensor parameters description for [EMPATICA_HEARTRATE]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing heart rate data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_heartrate_raw.csv
+- data/raw/{pid}/empatica_heartrate_with_datetime.csv
+- data/interim/{pid}/empatica_heartrate_features/empatica_heartrate_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_heartrate.csv
+
+
+

Parameters description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_HEARTRATE features from the DBDP provider
[FEATURES]Features to be computed from heart rate intraday data, see table below
+

Features description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxhrbeatsThe maximum heart rate during a time segment.
minhrbeatsThe minimum heart rate during a time segment.
avghrbeatsThe average heart rate during a time segment.
medianhrbeatsThe median of heart rate during a time segment.
modehrbeatsThe mode of heart rate during a time segment.
stdhrbeatsThe standard deviation of heart rate during a time segment.
diffmaxmodehrbeatsThe difference between the maximum and mode heart rate during a time segment.
diffminmodehrbeatsThe difference between the mode and minimum heart rate during a time segment.
entropyhrnatsShannon’s entropy measurement based on heart rate during a time segment.
+
+

Assumptions/Observations

+

We extract the previous features based on the average heart rate values computed in 10-second windows.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-inter-beat-interval/index.html b/1.3/features/empatica-inter-beat-interval/index.html new file mode 100644 index 00000000..302daea1 --- /dev/null +++ b/1.3/features/empatica-inter-beat-interval/index.html @@ -0,0 +1,2039 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Inter Beat Interval - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Inter Beat Interval

+

Sensor parameters description for [EMPATICA_INTER_BEAT_INTERVAL]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing inter beat interval data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_inter_beat_interval_raw.csv
+- data/raw/{pid}/empatica_inter_beat_interval_with_datetime.csv
+- data/interim/{pid}/empatica_inter_beat_interval_features/empatica_inter_beat_interval_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_inter_beat_interval.csv
+
+
+

Parameters description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_INTER_BEAT_INTERVAL features from the DBDP provider
[FEATURES]Features to be computed from inter beat interval intraday data, see table below
+

Features description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxibisecondsThe maximum inter beat interval during a time segment.
minibisecondsThe minimum inter beat interval during a time segment.
avgibisecondsThe average inter beat interval during a time segment.
medianibisecondsThe median of inter beat interval during a time segment.
modeibisecondsThe mode of inter beat interval during a time segment.
stdibisecondsThe standard deviation of inter beat interval during a time segment.
diffmaxmodeibisecondsThe difference between the maximum and mode inter beat interval during a time segment.
diffminmodeibisecondsThe difference between the mode and minimum inter beat interval during a time segment.
entropyibinatsShannon’s entropy measurement based on inter beat interval during a time segment.
+
+

Assumptions/Observations

+

For more information about IBI read this.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-tags/index.html b/1.3/features/empatica-tags/index.html new file mode 100644 index 00000000..f9cd2594 --- /dev/null +++ b/1.3/features/empatica-tags/index.html @@ -0,0 +1,1906 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Tags - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Tags

+

Sensor parameters description for [EMPATICA_TAGS]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing tags data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+
+

Note

+ +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/empatica-temperature/index.html b/1.3/features/empatica-temperature/index.html new file mode 100644 index 00000000..ba54d498 --- /dev/null +++ b/1.3/features/empatica-temperature/index.html @@ -0,0 +1,2039 @@ + + + + + + + + + + + + + + + + + + + + + + Empatica Temperature - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Empatica Temperature

+

Sensor parameters description for [EMPATICA_TEMPERATURE]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Name of the CSV file containing temperature data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.
+

DBDP provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/empatica_temperature_raw.csv
+- data/raw/{pid}/empatica_temperature_with_datetime.csv
+- data/interim/{pid}/empatica_temperature_features/empatica_temperature_{language}_{provider_key}.csv
+- data/processed/features/{pid}/empatica_temperature.csv
+
+
+

Parameters description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract EMPATICA_TEMPERATURE features from the DBDP provider
[FEATURES]Features to be computed from temperature intraday data, see table below
+

Features description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxtempdegrees CThe maximum temperature during a time segment.
mintempdegrees CThe minimum temperature during a time segment.
avgtempdegrees CThe average temperature during a time segment.
mediantempdegrees CThe median of temperature during a time segment.
modetempdegrees CThe mode of temperature during a time segment.
stdtempdegrees CThe standard deviation of temperature during a time segment.
diffmaxmodetempdegrees CThe difference between the maximum and mode temperature during a time segment.
diffminmodetempdegrees CThe difference between the mode and minimum temperature during a time segment.
entropytempnatsShannon’s entropy measurement based on temperature during a time segment.
+
+

Assumptions/Observations

+

None

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/feature-introduction/index.html b/1.3/features/feature-introduction/index.html new file mode 100644 index 00000000..dae732c2 --- /dev/null +++ b/1.3/features/feature-introduction/index.html @@ -0,0 +1,2034 @@ + + + + + + + + + + + + + + + + + + + + + + Introduction - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Behavioral Features Introduction

+

A behavioral feature is a metric computed from raw sensor data quantifying the behavior of a participant. For example, the time spent at home computed based on location data. These are also known as digital biomarkers.

+

RAPIDS’ config.yaml has a section for each supported device/sensor (e.g., PHONE_ACCELEROMETER, FITBIT_STEPS, EMPATICA_HEARTRATE). These sections follow a similar structure, and they can have one or more feature PROVIDERS, that compute one or more behavioral features. You will modify the parameters of these PROVIDERS to obtain features from different mobile sensors. We’ll use PHONE_ACCELEROMETER as an example to explain this further.

+
+

Hint

+
    +
  • We recommend reading this page if you are using RAPIDS for the first time
  • +
  • All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants).
  • +
  • Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS.
  • +
  • In short, to extract features offered by a provider, you need to set its [COMPUTE] flag to TRUE, configure any of its parameters, and execute RAPIDS.
  • +
+
+

Explaining the config.yaml sensor sections with an example

+

Each sensor section follows the same structure. Click on the numbered markers to know more.

+
PHONE_ACCELEROMETER: # (1)
+
+    CONTAINER: accelerometer # (2)
+
+    PROVIDERS: # (3)
+        RAPIDS:
+            COMPUTE: False # (4)
+            FEATURES: ["maxmagnitude", "minmagnitude", "avgmagnitude", "medianmagnitude", "stdmagnitude"]
+
+            SRC_SCRIPT: src/features/phone_accelerometer/rapids/main.py
+
+        PANDA:
+            COMPUTE: False
+            VALID_SENSED_MINUTES: False
+            FEATURES: # (5)
+                exertional_activity_episode: ["sumduration", "maxduration", "minduration", "avgduration", "medianduration", "stdduration"]
+                nonexertional_activity_episode: ["sumduration", "maxduration", "minduration", "avgduration", "medianduration", "stdduration"]
+
+                        # (6)
+            SRC_SCRIPT: src/features/phone_accelerometer/panda/main.py
+
+
    +
  1. +

    Sensor section

    +

    Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS.

    +
  2. +
  3. +

    Sensor Parameters.

    +

    Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed.

    +

    The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS].

    +

    We explain these parameters in a table at the top of each sensor documentation page.

    +
  4. +
  5. +

    Sensor Providers

    +

    Each object in this list represents a feature PROVIDER. Each sensor can have zero, one, or more providers.

    +

    A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB].

    +

    In this example, there are two accelerometer feature providers RAPIDS and PANDA.

    +
  6. +
  7. +

    PROVIDER Parameters

    +

    Each PROVIDER has parameters that affect the computation of the behavioral features it offers.

    +

    These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider’s behavioral features.

    +

    We explain every provider’s parameter in a table under the Parameters description heading on each provider documentation page.

    +
  8. +
  9. +

    PROVIDER Features

    +

    Each PROVIDER offers a set of behavioral features.

    +

    These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider.

    +

    In either case, you can delete the features you are not interested in, and they will not be included in the sensor’s output feature file.

    +

    We explain each behavioral feature in a table under the Features description heading on each provider documentation page.

    +
  10. +
  11. +

    PROVIDER script

    +

    Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features.

    +

    It has to be a relative path from RAPIDS’ root folder and the script’s parent folder should be named after the provider, e.g. panda.

    +
  12. +
+

These are the descriptions of each marker for accessibility:

+
    +
  1. +

    Sensor section

    +

    Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS.

    +
  2. +
  3. +

    Sensor Parameters.

    +

    Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed.

    +

    The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS].

    +

    We explain these parameters in a table at the top of each sensor documentation page.

    +
  4. +
  5. +

    Sensor Providers

    +

    Each object in this list represents a feature PROVIDER. Each sensor can have zero, one, or more providers.

    +

    A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB].

    +

    In this example, there are two accelerometer feature providers RAPIDS and PANDA.

    +
  6. +
  7. +

    PROVIDER Parameters

    +

    Each PROVIDER has parameters that affect the computation of the behavioral features it offers.

    +

    These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider’s behavioral features.

    +

    We explain every provider’s parameter in a table under the Parameters description heading on each provider documentation page.

    +
  8. +
  9. +

    PROVIDER Features

    +

    Each PROVIDER offers a set of behavioral features.

    +

    These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider.

    +

    In either case, you can delete the features you are not interested in, and they will not be included in the sensor’s output feature file.

    +

    We explain each behavioral feature in a table under the Features description heading on each provider documentation page.

    +
  10. +
  11. +

    PROVIDER script

    +

    Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features.

    +

    It has to be a relative path from RAPIDS’ root folder and the script’s parent folder should be named after the provider, e.g. panda.

    +
  12. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-calories-intraday/index.html b/1.3/features/fitbit-calories-intraday/index.html new file mode 100644 index 00000000..0881eac4 --- /dev/null +++ b/1.3/features/fitbit-calories-intraday/index.html @@ -0,0 +1,2128 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Calories Intraday - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Calories Intraday

+

Sensor parameters description for [FITBIT_CALORIES_INTRADAY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your calories intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Available time segments

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_calories_intraday_raw.csv
+- data/raw/{pid}/fitbit_calories_intraday_with_datetime.csv
+- data/interim/{pid}/fitbit_calories_intraday_features/fitbit_calories_intraday_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_calories_intraday.csv
+
+
+

Parameters description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                               Description
[COMPUTE]Set to True to extract FITBIT_CALORIES_INTRADAY features from the RAPIDS provider
[FEATURES]Features to be computed from calories intraday data, see table below
[EPISODE_TYPE]RAPIDS will compute features for any episodes in this list. There are seven types of episodes defined as consecutive appearances of a label. Four are based on the activity level labels provided by Fitbit: sedentary, lightly active, fairly active, and very active. One is defined by RAPIDS as moderate to vigorous physical activity MVPA episodes that are based on all fairly active, and very active labels. Two are defined by the user based on a threshold that divides low or high MET (metabolic equivalent) episodes.
EPISODE_TIME_THRESHOLDAny consecutive rows of the same [EPISODE_TYPE] will be considered a single episode if the time difference between them is less or equal than this threshold in minutes
[EPISODE_MET_THRESHOLD]Any 1-minute calorie data chunk with a MET value equal or higher than this threshold will be considered a high MET episode and low MET otherwise. The default value is 3
[EPISODE_MVPA_CATEGORIES]The Fitbit level labels that are considered part of a moderate to vigorous physical activity episode. One or more of sedentary, lightly active, fairly active, and very active. The default are fairly active and very active
[EPISODE_REFERENCE_TIME]Reference time for the start/end time features. MIDNIGHT sets the reference time to 00:00 of each day, START_OF_THE_SEGMENT sets the reference time to the start of the time segment (useful when a segment is shorter than a day or spans multiple days)
+

Features description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature                                                           UnitsDescription
starttimefirstepisodeEPISODE_TYPEminutesStart time of the first episode of type [EPISODE_TYPE]
endtimefirstepisodeEPISODE_TYPEminutesEnd time of the first episode of type [EPISODE_TYPE]
starttimelastepisodeEPISODE_TYPEminutesStart time of the last episode of type [EPISODE_TYPE]
endtimelastepisodeEPISODE_TYPEminutesEnd time of the last episode of type [EPISODE_TYPE]
starttimelongestepisodeEPISODE_TYPEminutesStart time of the longest episode of type [EPISODE_TYPE]
endtimelongestepisodeEPISODE_TYPEminutesEnd time of the longest episode of type [EPISODE_TYPE]
countepisodeEPISODE_TYPEepisodesThe number of episodes of type [EPISODE_TYPE]
sumdurationepisodeEPISODE_TYPEminutesThe sum of the duration of episodes of type [EPISODE_TYPE]
avgdurationepisodeEPISODE_TYPEminutesThe average of the duration of episodes of type [EPISODE_TYPE]
maxdurationepisodeEPISODE_TYPEminutesThe maximum of the duration of episodes of type [EPISODE_TYPE]
mindurationepisodeEPISODE_TYPEminutesThe minimum of the duration of episodes of type [EPISODE_TYPE]
stddurationepisodeEPISODE_TYPEminutesThe standard deviation of the duration of episodes of type [EPISODE_TYPE]
summetEPISODE_TYPEMETsThe sum of all METs during episodes of type [EPISODE_TYPE]
avgmetEPISODE_TYPEMETsThe average of all METs during episodes of type [EPISODE_TYPE]
maxmetEPISODE_TYPEMETsThe maximum of all METs during episodes of type [EPISODE_TYPE]
minmetEPISODE_TYPEMETsThe minimum of all METs during episodes of type [EPISODE_TYPE]
stdmetEPISODE_TYPEMETsThe standard deviation of all METs during episodes of type [EPISODE_TYPE]
sumcaloriesEPISODE_TYPEcaloriesThe sum of all calories during episodes of type [EPISODE_TYPE]
avgcaloriesEPISODE_TYPEcaloriesThe average of all calories during episodes of type [EPISODE_TYPE]
maxcaloriesEPISODE_TYPEcaloriesThe maximum of all calories during episodes of type [EPISODE_TYPE]
mincaloriesEPISODE_TYPEcaloriesThe minimum of all calories during episodes of type [EPISODE_TYPE]
stdcaloriesEPISODE_TYPEcaloriesThe standard deviation of all calories during episodes of type [EPISODE_TYPE]
+
+

Assumptions/Observations

+
    +
  • These features are based on intraday calories data that is usually obtained in 1-minute chunks from Fitbit’s API.
  • +
  • The MET value returned by Fitbit is divided by 10
  • +
  • Take into account that the intraday data returned by Fitbit can contain time series for calories burned inclusive of BMR, tracked activity, and manually logged activities.
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-data-yield/index.html b/1.3/features/fitbit-data-yield/index.html new file mode 100644 index 00000000..855afc1f --- /dev/null +++ b/1.3/features/fitbit-data-yield/index.html @@ -0,0 +1,2031 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Data Yield - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Data Yield

+

We use Fitbit heart rate intraday data to extract data yield features. Fitbit data yield features can be used to remove rows (time segments) that do not contain enough Fitbit data. You should decide what is your “enough” threshold depending on the time a participant was supposed to be wearing their Fitbit, the length of your study, and the rates of missing data that your analysis could handle.

+
+

Why is Fitbit data yield important?

+

Imagine that you want to extract FITBIT_STEPS_SUMMARY features on daily segments (00:00 to 23:59). Let’s say that on day 1 the Fitbit logged 6k as the total step count and the heart rate sensor logged 24 hours of data and on day 2 the Fitbit logged 101 as the total step count and the heart rate sensor logged 2 hours of data. It’s very likely that on day 2 you walked during the other 22 hours so including this day in your analysis could bias your results.

+
+

Sensor parameters description for [FITBIT_DATA_YIELD]:

+ + + + + + + + + + + + + +
Key                   Description
[SENSORS]The Fitbit sensor we considered for calculating the Fitbit data yield features. We only support FITBIT_HEARTRATE_INTRADAY since sleep data is commonly collected only overnight, and step counts are 0 even when not wearing the Fitbit device.
+

RAPIDS provider

+

Before explaining the data yield features, let’s define the following relevant concepts:

+
    +
  • A valid minute is any 60 second window when Fitbit heart rate intraday sensor logged at least 1 row of data
  • +
  • A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS]
  • +
+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_heartrate_intraday_raw.csv
+- data/raw/{pid}/fitbit_heartrate_intraday_with_datetime.csv
+- data/interim/{pid}/fitbit_data_yield_features/fitbit_data_yield_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_data_yield.csv
+
+
+

Parameters description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_DATA_YIELD features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS]The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid.
+

Features description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
ratiovalidyieldedminutes-The ratio between the number of valid minutes and the duration in minutes of a time segment.
ratiovalidyieldedhours-The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1.
+
+

Assumptions/Observations

+
    +
  1. +

    We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together.

    +

    For example, let’s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur:

    +

      +
    1. the 12 missing hours are from the beginning of the segment or
    2. +
    3. 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours).
    4. +

    +

    ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b. In other words, sensed data for b is more evenly spread compared to a.

    +
  2. +
  3. +

    We assume your Fitbit intraday data was sampled (requested form the Fitbit API) at 1 minute intervals, if the interval is longer, for example 15 minutes, you need to take into account that valid minutes and valid hours ratios are going to be small (for example you would have at most 4 “minutes” of data per hour because you would have four 15-minute windows) and so you should adjust your thresholds to include and exclude rows accordingly. If you are in this situation, get in touch with us, we could implement this use case but we are not sure there is enough demand for it at the moment since you can control the sampling rate of the data you request from Fitbit API.

    +
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-heartrate-intraday/index.html b/1.3/features/fitbit-heartrate-intraday/index.html new file mode 100644 index 00000000..d471b9a1 --- /dev/null +++ b/1.3/features/fitbit-heartrate-intraday/index.html @@ -0,0 +1,2046 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Heart Rate Intraday - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Heart Rate Intraday

+

Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your heart rate intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Available time segments

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_heartrate_intraday_raw.csv
+- data/raw/{pid}/fitbit_heartrate_intraday_with_datetime.csv
+- data/interim/{pid}/fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_heartrate_intraday.csv
+
+
+

Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider
[FEATURES]Features to be computed from heart rate intraday data, see table below
+

Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxhrbeats/minsThe maximum heart rate during a time segment.
minhrbeats/minsThe minimum heart rate during a time segment.
avghrbeats/minsThe average heart rate during a time segment.
medianhrbeats/minsThe median of heart rate during a time segment.
modehrbeats/minsThe mode of heart rate during a time segment.
stdhrbeats/minsThe standard deviation of heart rate during a time segment.
diffmaxmodehrbeats/minsThe difference between the maximum and mode heart rate during a time segment.
diffminmodehrbeats/minsThe difference between the mode and minimum heart rate during a time segment.
entropyhrnatsShannon’s entropy measurement based on heart rate during a time segment.
minutesonZONEminutesNumber of minutes the user’s heart rate fell within each heartrate_zone during a time segment.
+
+

Assumptions/Observations

+
    +
  1. There are four heart rate zones (ZONE): outofrange, fatburn, cardio, and peak. Please refer to Fitbit documentation for more information about the way they are computed.
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-heartrate-summary/index.html b/1.3/features/fitbit-heartrate-summary/index.html new file mode 100644 index 00000000..f52a79b4 --- /dev/null +++ b/1.3/features/fitbit-heartrate-summary/index.html @@ -0,0 +1,2081 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Heart Rate Summary - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Heart Rate Summary

+

Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your heart rate summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Available time segments

+
    +
  • Only available for segments that span 1 or more complete days (e.g. Jan 1st 00:00 to Jan 3rd 23:59)
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_heartrate_summary_raw.csv
+- data/raw/{pid}/fitbit_heartrate_summary_with_datetime.csv
+- data/interim/{pid}/fitbit_heartrate_summary_features/fitbit_heartrate_summary_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_heartrate_summary.csv
+
+
+

Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider
[FEATURES]Features to be computed from heart rate summary data, see table below
+

Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxrestinghrbeats/minsThe maximum daily resting heart rate during a time segment.
minrestinghrbeats/minsThe minimum daily resting heart rate during a time segment.
avgrestinghrbeats/minsThe average daily resting heart rate during a time segment.
medianrestinghrbeats/minsThe median of daily resting heart rate during a time segment.
moderestinghrbeats/minsThe mode of daily resting heart rate during a time segment.
stdrestinghrbeats/minsThe standard deviation of daily resting heart rate during a time segment.
diffmaxmoderestinghrbeats/minsThe difference between the maximum and mode daily resting heart rate during a time segment.
diffminmoderestinghrbeats/minsThe difference between the mode and minimum daily resting heart rate during a time segment.
entropyrestinghrnatsShannon’s entropy measurement based on daily resting heart rate during a time segment.
sumcaloriesZONEcalsThe total daily calories burned within heartrate_zone during a time segment.
maxcaloriesZONEcalsThe maximum daily calories burned within heartrate_zone during a time segment.
mincaloriesZONEcalsThe minimum daily calories burned within heartrate_zone during a time segment.
avgcaloriesZONEcalsThe average daily calories burned within heartrate_zone during a time segment.
mediancaloriesZONEcalsThe median of daily calories burned within heartrate_zone during a time segment.
stdcaloriesZONEcalsThe standard deviation of daily calories burned within heartrate_zone during a time segment.
entropycaloriesZONEnatsShannon’s entropy measurement based on daily calories burned within heartrate_zone during a time segment.
+
+

Assumptions/Observations

+
    +
  1. +

    There are four heart rate zones (ZONE): outofrange, fatburn, cardio, and peak. Please refer to Fitbit documentation for more information about the way they are computed.

    +
  2. +
  3. +

    Calories’ accuracy depends on the users’ Fitbit profile (weight, height, etc.).

    +
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-sleep-intraday/index.html b/1.3/features/fitbit-sleep-intraday/index.html new file mode 100644 index 00000000..5fc96052 --- /dev/null +++ b/1.3/features/fitbit-sleep-intraday/index.html @@ -0,0 +1,2305 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Sleep Intraday - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Sleep Intraday

+

Sensor parameters description for [FITBIT_SLEEP_INTRADAY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your sleep intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Understanding RAPIDS features

+

This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider.

+
+
+

Available time segments

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_sleep_intraday_raw.csv
+- data/raw/{pid}/fitbit_sleep_intraday_with_datetime.csv
+- data/interim/{pid}/fitbit_sleep_intraday_episodes.csv
+- data/interim/{pid}/fitbit_sleep_intraday_episodes_resampled.csv
+- data/interim/{pid}/fitbit_sleep_intraday_episodes_resampled_with_datetime.csv
+- data/interim/{pid}/fitbit_sleep_intraday_features/fitbit_sleep_intraday_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_sleep_intraday.csv
+
+
+

Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_SLEEP_INTRADAY features from the RAPIDS provider
[FEATURES]Features to be computed from sleep intraday data, see table below
[SLEEP_LEVELS]Fitbit’s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES. STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels (awake, restless, and asleep), STAGES contains 4 sleep levels (wake, deep, light, rem). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake (CLASSIC: awake and restless; STAGES: wake) and asleep (CLASSIC: asleep; STAGES: deep, light, and rem). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes LEVELS_AND_TYPES features grouping all levels together in a single all category.
[SLEEP_TYPES]Types of sleep to be included in the feature extraction computation. There are three sleep types: main, nap, and all. The all type means both main sleep and naps are considered.
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS][LEVELS_AND_TYPES]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature                                          UnitsDescription
countepisode[LEVEL][TYPE]episodesNumber of [LEVEL][TYPE]sleep episodes. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
sumduration[LEVEL][TYPE]minutesTotal duration of all [LEVEL][TYPE]sleep episodes. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
maxduration[LEVEL][TYPE]minutesLongest duration of any [LEVEL][TYPE]sleep episode. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
minduration[LEVEL][TYPE]minutesShortest duration of any [LEVEL][TYPE]sleep episode. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
avgduration[LEVEL][TYPE]minutesAverage duration of all [LEVEL][TYPE]sleep episodes. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
medianduration[LEVEL][TYPE]minutesMedian duration of all [LEVEL][TYPE]sleep episodes. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
stdduration[LEVEL][TYPE]minutesStandard deviation duration of all [LEVEL][TYPE]sleep episodes. [LEVEL]is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types.
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_LEVELS]:

+ + + + + + + + + + + + + + + + + + + + +
Feature                           UnitsDescription
ratiocount[LEVEL]-Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake, deep, light, and rem episodes were rem? (e.g., \(countepisode[remstages][all] / countepisode[all][all]\))
ratioduration[LEVEL]-Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake, deep, light, and rem time was rem? (e.g., \(sumduration[remstages][all] / sumduration[all][all]\))
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_TYPES]:

+ + + + + + + + + + + + + + + + + + + + +
Feature                           UnitsDescription
ratiocountmain-Ratio between the count of all main episodes (independently of the levels inside) divided by the count of all main and nap episodes. This answers the question: what percentage of all sleep episodes (main and nap) were main? We do not provide the ratio for nap because is complementary. (\(countepisode[all][main] / countepisode[all][all]\))
ratiodurationmain-Ratio between the duration of all main episodes (independently of the levels inside) divided by the duration of all main and nap episodes. This answers the question: what percentage of all sleep time (main and nap) was main? We do not provide the ratio for nap because is complementary. (\(sumduration[all][main] / sumduration[all][all]\))
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_LEVELS]:

+ + + + + + + + + + + + + + + + + + + + +
Feature                           UnitsDescription
ratiocountmainwithin[LEVEL]-Ratio between the count of episodes of a single sleep [LEVEL] during main sleep divided by the count of episodes of a single sleep [LEVEL] during main and nap. This answers the question: are rem episodes more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. (\(countepisode[remstages][main] / countepisode[remstages][all]\))
ratiodurationmainwithin[LEVEL]-Ratio between the duration of episodes of a single sleep [LEVEL] during main sleep divided by the duration of episodes of a single sleep [LEVEL] during main and nap. This answers the question: is rem time more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. (\(countepisode[remstages][main] / countepisode[remstages][all]\))
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_TYPES]:

+ + + + + + + + + + + + + + + + + + + + +
Feature                                                   UnitsDescription
ratiocount[LEVEL]within[TYPE]-Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake, deep, light, and rem episodes were rem during main/nap sleep time? (e.g., \(countepisode[remstages][main] / countepisode[all][main]\))
ratioduration[LEVEL]within[TYPE]-Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake, deep, light, and rem time was rem during main/nap sleep time? (e.g., \(sumduration[remstages][main] / sumduration[all][main]\))
+
+

Assumptions/Observations

+
    +
  1. This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider.
  2. +
  3. Features listed in [LEVELS_AND_TYPES] are computed for any levels and types listed in [SLEEP_LEVELS] or [SLEEP_TYPES]. For example if STAGES only contains [rem, light] you will not get countepisode[wake|deep][TYPE] or sum, max, min, avg, median, or std duration. Levels or types in these lists do not influence RATIOS or ROUTINE features.
  4. +
  5. Any [LEVEL] grouping is done within the elements of each class CLASSIC, STAGES, and UNIFIED. That is, we never combine CLASSIC or STAGES types to compute features.
  6. +
  7. The categories for all levels (when INCLUDE_ALL_GROUPS is True) and all SLEEP_TYPES are not considered for RATIOS features as they are always 1.
  8. +
  9. These features can be computed in time segments of any length, but only the 1-minute sleep chunks within each segment instance will be used.
  10. +
+
+

PRICE provider

+
+

Understanding PRICE features

+

This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider.

+
+
+

Available time segments

+
    +
  • Available for any time segments larger or equal to one day
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_sleep_intraday_raw.csv
+- data/raw/{pid}/fitbit_sleep_intraday_parsed.csv
+- data/interim/{pid}/fitbit_sleep_intraday_episodes_resampled.csv
+- data/interim/{pid}/fitbit_sleep_intraday_episodes_resampled_with_datetime.csv
+- data/interim/{pid}/fitbit_sleep_intraday_features/fitbit_sleep_intraday_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_sleep_intraday.csv
+
+
+

Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                                                 Description
[COMPUTE]Set to True to extract FITBIT_SLEEP_INTRADAY features from the PRICE provider
[FEATURES]Features to be computed from sleep intraday data, see table below
[SLEEP_LEVELS]Fitbit’s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES. STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels (awake, restless, and asleep), STAGES contains 4 sleep levels (wake, deep, light, rem). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake (CLASSIC: awake and restless; STAGES: wake) and asleep (CLASSIC: asleep; STAGES: deep, light, and rem). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes avgdurationallmain[DAY_TYPE] features grouping all levels together in a single all category.
[DAY_TYPE]The features of this provider can be computed using daily averages/standard deviations that were extracted on WEEKEND days only, WEEK days only, or ALL days
[LAST_NIGHT_END]Only main sleep episodes that start within the LNE-LNE interval [LAST_NIGHT_END, LAST_NIGHT_END + 23H 59M 59S] are taken into account to compute the features described below. [LAST_NIGHT_END] is a number ranging from 0 (midnight) to 1439 (23:59).
+

Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature                                                            UnitsDescription
avgduration[LEVEL]main[DAY_TYPE]minutesAverage duration of daily sleep chunks of a LEVEL. Use the DAY_TYPE flag to include daily durations from weekend days only, weekdays, or both. Use [LEVEL] to group all levels in a single all category.
avgratioduration[LEVEL]withinmain[DAY_TYPE]-Average of the daily ratio between the duration of sleep chunks of a LEVEL and total duration of all main sleep episodes in a day. When INCLUDE_ALL_GROUPS is True the all LEVEL is ignored since this feature is always 1. Use the DAY_TYPE flag to include start times from weekend days only, weekdays, or both.
avgstarttimeofepisodemain[DAY_TYPE]minutesAverage of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
avgendtimeofepisodemain[DAY_TYPE]minutesAverage of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
avgmidpointofepisodemain[DAY_TYPE]minutesAverage of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
stdstarttimeofepisodemain[DAY_TYPE]minutesStandard deviation of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
stdendtimeofepisodemain[DAY_TYPE]minutesStandard deviation of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
stdmidpointofepisodemain[DAY_TYPE]minutesStandard deviation of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both.
socialjetlagminutesDifference in minutes between the avgmidpointofepisodemain of weekends and weekdays that belong to each time segment instance. If your time segment does not contain at least one week day and one weekend day this feature will be NA.
rmssdmeanstarttimeofepisodemainminutesSquare root of the mean squared successive difference (RMSSD) between today’s and yesterday’s starttimeofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the mean of how someone’s starttimeofepisodemain (bedtime) changed from night to night.
rmssdmeanendtimeofepisodemainminutesSquare root of the mean squared successive difference (RMSSD) between today’s and yesterday’s endtimeofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the mean of how someone’s endtimeofepisodemain (wake time) changed from night to night.
rmssdmeanmidpointofepisodemainminutesSquare root of the mean squared successive difference (RMSSD) between today’s and yesterday’s midpointofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the mean of how someone’s midpointofepisodemain (mid time between bedtime and wake time) changed from night to night.
rmssdmedianstarttimeofepisodemainminutesSquare root of the median squared successive difference (RMSSD) between today’s and yesterday’s starttimeofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the median of how someone’s starttimeofepisodemain (bedtime) changed from night to night.
rmssdmedianendtimeofepisodemainminutesSquare root of the median squared successive difference (RMSSD) between today’s and yesterday’s endtimeofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the median of how someone’s endtimeofepisodemain (wake time) changed from night to night.
rmssdmedianmidpointofepisodemainminutesSquare root of the median squared successive difference (RMSSD) between today’s and yesterday’s midpointofepisodemain values across the entire participant’s sleep data grouped per time segment instance. It represents the median of how someone’s midpointofepisodemain (average mid time between bedtime and wake time) changed from night to night.
+
+

Assumptions/Observations

+
    +
  1. This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider.
  2. +
  3. We recommend you use periodic segments that start in the morning so RAPIDS can chunk and group sleep episodes overnight. Shifted segments (as any other segments) are labelled based on their start and end date times.
  4. +
  5. avgstarttime... and avgendtime... are roughly equivalent to an average bed and awake time only if you are using shifted segments.
  6. +
  7. The features of this provider are only available on time segments that are longer than 24 hours because they are based on descriptive statistics computed across daily values.
  8. +
  9. Even though Fitbit provides 2 types of sleep episodes (main and nap), only main sleep episodes are considered.
  10. +
  11. The reference point for all times is 00:00 of the first day in the LNE-LNE interval.
  12. +
  13. Sleep episodes are formed by 1-minute chunks that we group overnight starting from today’s LNE and ending on tomorrow’s LNE or the end of that segment (whatever is first).
  14. +
  15. The features avgstarttime... and avgendtime... are the average of the first and last sleep episode across every LNE-LNE interval within a segment (avgmidtime... is the mid point between start and end). Therefore, only segments longer than 24hrs will be averaged across more than one LNE-LNE interval.
  16. +
  17. socialjetlag is only available on segment instances equal or longer than 48hrs that contain at least one weekday day and one weekend day, for example seven-day (weekly) segments.
  18. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-sleep-summary/index.html b/1.3/features/fitbit-sleep-summary/index.html new file mode 100644 index 00000000..ced4d40a --- /dev/null +++ b/1.3/features/fitbit-sleep-summary/index.html @@ -0,0 +1,2099 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Sleep Summary - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Sleep Summary

+

Sensor parameters description for [FITBIT_SLEEP_SUMMARY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your sleep summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Understanding RAPIDS features

+

This diagram will help you understand how sleep episodes are chunked and grouped within time segments using SLEEP_SUMMARY_LAST_NIGHT_END for the RAPIDS provider.

+
+
+

Available time segments

+
    +
  • Only available for segments that span 1 or more complete days (e.g. Jan 1st 00:00 to Jan 3rd 23:59)
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_sleep_summary_raw.csv
+- data/raw/{pid}/fitbit_sleep_summary_with_datetime.csv
+- data/interim/{pid}/fitbit_sleep_summary_features/fitbit_sleep_summary_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_sleep_summary.csv
+
+
+

Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider
[SLEEP_TYPES]Types of sleep to be included in the feature extraction computation. There are three sleep types: main, nap, and all. The all type means both main sleep and naps are considered.
[FEATURES]Features to be computed from sleep summary data, see table below
[FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END]As an exception, the LAST_NIGHT_END parameter for this provider is in the data stream configuration section. This parameter controls how sleep episodes are assigned to different days and affects wake and bedtimes.
+

Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
firstwaketimeTYPEminutesFirst wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode’s end time.
lastwaketimeTYPEminutesLast wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode’s end time.
firstbedtimeTYPEminutesFirst bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode’s start time.
lastbedtimeTYPEminutesLast bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode’s start time.
countepisodeTYPEepisodesNumber of sleep episodes for a certain sleep type during a time segment.
avgefficiencyTYPEscoresAverage sleep efficiency for a certain sleep type during a time segment.
sumdurationafterwakeupTYPEminutesTotal duration the user stayed in bed after waking up for a certain sleep type during a time segment.
sumdurationasleepTYPEminutesTotal sleep duration for a certain sleep type during a time segment.
sumdurationawakeTYPEminutesTotal duration the user stayed awake but still in bed for a certain sleep type during a time segment.
sumdurationtofallasleepTYPEminutesTotal duration the user spent to fall asleep for a certain sleep type during a time segment.
sumdurationinbedTYPEminutesTotal duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment.
avgdurationafterwakeupTYPEminutesAverage duration the user stayed in bed after waking up for a certain sleep type during a time segment.
avgdurationasleepTYPEminutesAverage sleep duration for a certain sleep type during a time segment.
avgdurationawakeTYPEminutesAverage duration the user stayed awake but still in bed for a certain sleep type during a time segment.
avgdurationtofallasleepTYPEminutesAverage duration the user spent to fall asleep for a certain sleep type during a time segment.
avgdurationinbedTYPEminutesAverage duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment.
+
+

Assumptions/Observations

+
    +
  1. This diagram will help you understand how sleep episodes are chunked and grouped within time segments using LNE for the RAPIDS provider.
  2. +
  3. There are three sleep types (TYPE): main, nap, all. The all type groups both main sleep and naps. All types are based on Fitbit’s labels.
  4. +
  5. There are two versions of Fitbit’s sleep API (version 1 and version 1.2), and each provides raw sleep data in a different format:
      +
    • Count & duration summaries. v1 contains count_awake, duration_awake, count_awakenings, count_restless, and duration_restless fields for every sleep record but v1.2 does not.
    • +
    +
  6. +
  7. API columns. Most features are computed based on the values provided by Fitbit’s API: efficiency, minutes_after_wakeup, minutes_asleep, minutes_awake, minutes_to_fall_asleep, minutes_in_bed, is_main_sleep and type.
  8. +
  9. Bed time and sleep duration are based on episodes that started between today’s LNE and tomorrow’s LNE while awake time is based on the episodes that started between yesterday’s LNE and today’s LNE
  10. +
  11. The reference point for bed/awake times is today’s 00:00. You can have bedtimes larger than 24 and awake times smaller than 0
  12. +
  13. These features are only available for time segments that span midnight to midnight of the same or different day.
  14. +
  15. We include first and last wake and bedtimes because, when LAST_NIGHT_END is 10 am, the first bedtime could match a nap at 2 pm, and the last bedtime could match a main overnight sleep episode that starts at 10pm.
  16. +
  17. Set the value for SLEEP_SUMMARY_LAST_NIGHT_END int the config parameter [FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END].
  18. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-steps-intraday/index.html b/1.3/features/fitbit-steps-intraday/index.html new file mode 100644 index 00000000..5896e8e7 --- /dev/null +++ b/1.3/features/fitbit-steps-intraday/index.html @@ -0,0 +1,2095 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Steps Intraday - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Steps Intraday

+

Sensor parameters description for [FITBIT_STEPS_INTRADAY]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your steps intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
[EXCLUDE_SLEEP]Step data will be excluded if it was logged during sleep periods when at least one [EXCLUDE] flag is set to True. Sleep can be delimited by (1) a fixed period that repeats on every day if [TIME_BASED][EXCLUDE] is True or (2) by Fitbit summary sleep episodes if [FITBIT_BASED][EXCLUDE] is True. If both are True (3), we use all Fitbit sleep episodes as well as the time-based episodes that do not overlap with any Fitbit episodes. If [TIME_BASED][EXCLUDE] is True, make sure Fitbit sleep summary container points to a valid table or file.
+

RAPIDS provider

+
+

Available time segments

+
    +
  • Available for all time segments
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_steps_intraday_raw.csv
+- data/raw/{pid}/fitbit_steps_intraday_with_datetime.csv
+- data/raw/{pid}/fitbit_sleep_summary_raw.csv (Only when [EXCLUDE_SLEEP][EXCLUDE]=True and [EXCLUDE_SLEEP][TYPE]=FITBIT_BASED)
+- data/interim/{pid}/fitbit_steps_intraday_with_datetime_exclude_sleep.csv (Only when [EXCLUDE_SLEEP][EXCLUDE]=True)
+- data/interim/{pid}/fitbit_steps_intraday_features/fitbit_steps_intraday_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_steps_intraday.csv
+
+
+

Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider
[FEATURES]Features to be computed from steps intraday data, see table below
[THRESHOLD_ACTIVE_BOUT]Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active.
[INCLUDE_ZERO_STEP_ROWS]Whether or not to include time segments with a 0 step count during the whole day.
+

Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
sumstepsstepsThe total step count during a time segment.
maxstepsstepsThe maximum step count during a time segment.
minstepsstepsThe minimum step count during a time segment.
avgstepsstepsThe average step count during a time segment.
stdstepsstepsThe standard deviation of step count during a time segment.
countepisodesedentaryboutboutsNumber of sedentary bouts during a time segment.
sumdurationsedentaryboutminutesTotal duration of all sedentary bouts during a time segment.
maxdurationsedentaryboutminutesThe maximum duration of any sedentary bout during a time segment.
mindurationsedentaryboutminutesThe minimum duration of any sedentary bout during a time segment.
avgdurationsedentaryboutminutesThe average duration of sedentary bouts during a time segment.
stddurationsedentaryboutminutesThe standard deviation of the duration of sedentary bouts during a time segment.
countepisodeactiveboutboutsNumber of active bouts during a time segment.
sumdurationactiveboutminutesTotal duration of all active bouts during a time segment.
maxdurationactiveboutminutesThe maximum duration of any active bout during a time segment.
mindurationactiveboutminutesThe minimum duration of any active bout during a time segment.
avgdurationactiveboutminutesThe average duration of active bouts during a time segment.
stddurationactiveboutminutesThe standard deviation of the duration of active bouts during a time segment.
+
+

Assumptions/Observations

+
    +
  1. Active and sedentary bouts. If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary.
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/fitbit-steps-summary/index.html b/1.3/features/fitbit-steps-summary/index.html new file mode 100644 index 00000000..fe26bb7a --- /dev/null +++ b/1.3/features/fitbit-steps-summary/index.html @@ -0,0 +1,2019 @@ + + + + + + + + + + + + + + + + + + + + + + Fitbit Steps Summary - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Fitbit Steps Summary

+

Sensor parameters description for [FITBIT_STEPS_SUMMARY]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Container where your steps summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.
+

RAPIDS provider

+
+

Available time segments

+
    +
  • Only available for segments that span 1 or more complete days (e.g. Jan 1st 00:00 to Jan 3rd 23:59)
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/fitbit_steps_summary_raw.csv
+- data/raw/{pid}/fitbit_steps_summary_with_datetime.csv
+- data/interim/{pid}/fitbit_steps_summary_features/fitbit_steps_summary_{language}_{provider_key}.csv
+- data/processed/features/{pid}/fitbit_steps_summary.csv
+
+
+

Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider
[FEATURES]Features to be computed from steps summary data, see table below
+

Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxsumstepsstepsThe maximum daily step count during a time segment.
minsumstepsstepsThe minimum daily step count during a time segment.
avgsumstepsstepsThe average daily step count during a time segment.
mediansumstepsstepsThe median of daily step count during a time segment.
stdsumstepsstepsThe standard deviation of daily step count during a time segment.
+
+

Assumptions/Observations

+

NA

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-accelerometer/index.html b/1.3/features/phone-accelerometer/index.html new file mode 100644 index 00000000..e8dba808 --- /dev/null +++ b/1.3/features/phone-accelerometer/index.html @@ -0,0 +1,2121 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Accelerometer - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Accelerometer

+

Sensor parameters description for [PHONE_ACCELEROMETER]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the accelerometer data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_accelerometer_raw.csv
+- data/raw/{pid}/phone_accelerometer_with_datetime.csv
+- data/interim/{pid}/phone_accelerometer_features/phone_accelerometer_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_accelerometer.csv
+
+
+

Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
maxmagnitudem/s2The maximum magnitude of acceleration (\(\|acceleration\| = \sqrt{x^2 + y^2 + z^2}\)).
minmagnitudem/s2The minimum magnitude of acceleration.
avgmagnitudem/s2The average magnitude of acceleration.
medianmagnitudem/s2The median magnitude of acceleration.
stdmagnitudem/s2The standard deviation of acceleration.
+
+

Assumptions/Observations

+
    +
  1. Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem.
  2. +
+
+

PANDA provider

+

These features are based on the work by Panda et al.

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_accelerometer_raw.csv
+- data/raw/{pid}/phone_accelerometer_with_datetime.csv
+- data/interim/{pid}/phone_accelerometer_features/phone_accelerometer_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_accelerometer.csv
+
+
+

Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider
[FEATURES]Features to be computed for exertional and non-exertional activity episodes, see table below
+

Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
sumdurationminutesTotal duration of all exertional or non-exertional activity episodes.
maxdurationminutesLongest duration of any exertional or non-exertional activity episode.
mindurationminutesShortest duration of any exertional or non-exertional activity episode.
avgdurationminutesAverage duration of any exertional or non-exertional activity episode.
mediandurationminutesMedian duration of any exertional or non-exertional activity episode.
stddurationminutesStandard deviation of the duration of all exertional or non-exertional activity episodes.
+
+

Assumptions/Observations

+
    +
  1. Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem.
  2. +
  3. See Panda et al for a definition of exertional and non-exertional activity episodes
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-activity-recognition/index.html b/1.3/features/phone-activity-recognition/index.html new file mode 100644 index 00000000..a3f719a3 --- /dev/null +++ b/1.3/features/phone-activity-recognition/index.html @@ -0,0 +1,2096 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Activity Recognition - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Activity Recognition

+

Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION]:

+ + + + + + + + + + + + + + + + + + + + + +
Key                                                              Description
[CONTAINER][ANDROID]Data stream container (database table, CSV file, etc.) where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS)
[CONTAINER][IOS]Data stream container (database table, CSV file, etc.) where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS)
[EPISODE_THRESHOLD_BETWEEN_ROWS]Difference in minutes between any two rows for them to be considered part of the same activity episode
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_activity_recognition_raw.csv
+- data/raw/{pid}/phone_activity_recognition_with_datetime.csv
+- data/interim/{pid}/phone_activity_recognition_episodes.csv
+- data/interim/{pid}/phone_activity_recognition_episodes_resampled.csv
+- data/interim/{pid}/phone_activity_recognition_episodes_resampled_with_datetime.csv
+- data/interim/{pid}/phone_activity_recognition_features/phone_activity_recognition_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_activity_recognition.csv
+
+
+

Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[ACTIVITY_CLASSES][STATIONARY]An array of the activity labels to be considered in the STATIONARY category choose any of still, tilting
[ACTIVITY_CLASSES][MOBILE]An array of the activity labels to be considered in the MOBILE category choose any of on_foot, walking, running, on_bicycle
[ACTIVITY_CLASSES][VEHICLE]An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule
+

Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countrowsNumber of episodes.
mostcommonactivityactivity typeThe most common activity type (e.g. still, on_foot, etc.). If there is a tie, the first one is chosen.
countuniqueactivitiesactivity typeNumber of unique activities.
durationstationaryminutesThe total duration of [ACTIVITY_CLASSES][STATIONARY] episodes
durationmobileminutesThe total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities
durationvehicleminutesThe total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity
+
+

Assumptions/Observations

+
    +
  1. +

    iOS Activity Recognition names and types are unified with Android labels:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    iOS Activity NameAndroid Activity NameAndroid Activity Type
    walkingwalking7
    runningrunning8
    cyclingon_bicycle1
    automotivein_vehicle0
    stationarystill3
    unknownunknown4
    +
  2. +
  3. +

    In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file.

    +
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-applications-crashes/index.html b/1.3/features/phone-applications-crashes/index.html new file mode 100644 index 00000000..854b2dbb --- /dev/null +++ b/1.3/features/phone-applications-crashes/index.html @@ -0,0 +1,1919 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Applications Crashes - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Applications Crashes

+

Sensor parameters description for [PHONE_APPLICATIONS_CRASHES]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the applications crashes data is stored
[APPLICATION_CATEGORIES][CATALOGUE_SOURCE]FILE or GOOGLE. If FILE, app categories (genres) are read from [CATALOGUE_FILE]. If [GOOGLE], app categories (genres) are scrapped from the Play Store
[APPLICATION_CATEGORIES][CATALOGUE_FILE]CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv
[APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE]if [CATALOGUE_SOURCE] is equal to FILE, this flag signals whether or not to update [CATALOGUE_FILE], if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE]
[APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES]This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE]. If [CATALOGUE_SOURCE] is equal to GOOGLE, all genres are scraped anyway (this flag is ignored)
+
+

Note

+

No feature providers have been implemented for this sensor yet, however you can use its key (PHONE_APPLICATIONS_CRASHES) to improve PHONE_DATA_YIELD or you can implement your own features.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-applications-foreground/index.html b/1.3/features/phone-applications-foreground/index.html new file mode 100644 index 00000000..7f295a0a --- /dev/null +++ b/1.3/features/phone-applications-foreground/index.html @@ -0,0 +1,2055 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Applications Foreground - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Applications Foreground

+

Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS):

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the applications foreground data is stored
[APPLICATION_CATEGORIES][CATALOGUE_SOURCE]FILE or GOOGLE. If FILE, app categories (genres) are read from [CATALOGUE_FILE]. If [GOOGLE], app categories (genres) are scrapped from the Play Store
[APPLICATION_CATEGORIES][CATALOGUE_FILE]CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv
[APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE]if [CATALOGUE_SOURCE] is equal to FILE, this flag signals whether or not to update [CATALOGUE_FILE], if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE]
[APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES]This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE]. If [CATALOGUE_SOURCE] is equal to GOOGLE, all genres are scraped anyway (this flag is ignored)
+

RAPIDS provider

+

The app category (genre) catalogue used in these features was originally created by Stachl et al.

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_applications_foreground_raw.csv
+- data/raw/{pid}/phone_applications_foreground_with_datetime.csv
+- data/raw/{pid}/phone_applications_foreground_with_datetime_with_categories.csv
+- data/interim/{pid}/phone_applications_foreground_features/phone_applications_foreground_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_applications_foreground.csv
+
+
+

Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                        Description
[COMPUTE]Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[SINGLE_CATEGORIES]An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above)
[MULTIPLE_CATEGORIES]An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above)
[SINGLE_APPS]An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study)
[EXCLUDED_CATEGORIES]An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above)
[EXCLUDED_APPS]An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube
+

Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countappsNumber of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app)
timeoffirstuseminutesThe time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment
timeoflastuseminutesThe time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment
frequencyentropynatsThe entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app
+
+

Assumptions/Observations

+

Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories).

+

Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where “genre” == EXCLUDED_CATEGORIES or “package_name” == EXCLUDED_APPS.

+

We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS (data/external/stachl_application_genre_catalogue.csv), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-applications-notifications/index.html b/1.3/features/phone-applications-notifications/index.html new file mode 100644 index 00000000..d59e3684 --- /dev/null +++ b/1.3/features/phone-applications-notifications/index.html @@ -0,0 +1,1919 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Applications Notifications - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Applications Notifications

+

Sensor parameters description for [PHONE_APPLICATIONS_NOTIFICATIONS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the applications notifications data is stored
[APPLICATION_CATEGORIES][CATALOGUE_SOURCE]FILE or GOOGLE. If FILE, app categories (genres) are read from [CATALOGUE_FILE]. If [GOOGLE], app categories (genres) are scrapped from the Play Store
[APPLICATION_CATEGORIES][CATALOGUE_FILE]CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv
[APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE]if [CATALOGUE_SOURCE] is equal to FILE, this flag signals whether or not to update [CATALOGUE_FILE], if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE]
[APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES]This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE]. If [CATALOGUE_SOURCE] is equal to GOOGLE, all genres are scraped anyway (this flag is ignored)
+
+

Note

+

No feature providers have been implemented for this sensor yet, however you can use its key (PHONE_APPLICATIONS_NOTIFICATIONS) to improve PHONE_DATA_YIELD or you can implement your own features.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-battery/index.html b/1.3/features/phone-battery/index.html new file mode 100644 index 00000000..38430ec1 --- /dev/null +++ b/1.3/features/phone-battery/index.html @@ -0,0 +1,2034 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Battery - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Battery

+

Sensor parameters description for [PHONE_BATTERY]:

+ + + + + + + + + + + + + + + + + +
Key                                                              Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the battery data is stored
[EPISODE_THRESHOLD_BETWEEN_ROWS]Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_battery_raw.csv
+- data/interim/{pid}/phone_battery_episodes.csv
+- data/interim/{pid}/phone_battery_episodes_resampled.csv
+- data/interim/{pid}/phone_battery_episodes_resampled_with_datetime.csv
+- data/interim/{pid}/phone_battery_features/phone_battery_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_battery.csv
+
+
+

Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_BATTERY features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countdischargeepisodesNumber of discharging episodes.
sumdurationdischargeminutesThe total duration of all discharging episodes.
countchargeepisodesNumber of battery charging episodes.
sumdurationchargeminutesThe total duration of all charging episodes.
avgconsumptionrateepisodes/minutesThe average of all episodes’ consumption rates. An episode’s consumption rate is defined as the ratio between its battery delta and duration
maxconsumptionrateepisodes/minutesThe highest of all episodes’ consumption rates. An episode’s consumption rate is defined as the ratio between its battery delta and duration
+
+

Assumptions/Observations

+
    +
  1. We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3
  2. +
  3. We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-bluetooth/index.html b/1.3/features/phone-bluetooth/index.html new file mode 100644 index 00000000..3130edb7 --- /dev/null +++ b/1.3/features/phone-bluetooth/index.html @@ -0,0 +1,2195 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Bluetooth - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Bluetooth

+

Sensor parameters description for [PHONE_BLUETOOTH]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the bluetooth data is stored
+

RAPIDS provider

+
+

Warning

+

The features of this provider are deprecated in favor of DORYAB provider (see below).

+
+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_bluetooth_raw.csv
+- data/raw/{pid}/phone_bluetooth_with_datetime.csv
+- data/interim/{pid}/phone_bluetooth_features/phone_bluetooth_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_bluetooth.csv"
+
+
+

Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countscansdevicesNumber of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately
uniquedevicesdevicesNumber of unique devices during a time segment as identified by their hardware (bt_address) address
countscansmostuniquedevicescansNumber of scans of the most sensed device within each time segment instance
+
+

Assumptions/Observations

+
    +
  • From v0.2.0 countscans, uniquedevices, countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider
  • +
+
+

DORYAB provider

+

This provider is adapted from the work by Doryab et al.

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_bluetooth_raw.csv
+- data/raw/{pid}/phone_bluetooth_with_datetime.csv
+- data/interim/{pid}/phone_bluetooth_features/phone_bluetooth_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_bluetooth.csv"
+
+
+

Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider
[FEATURES]Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices.
+

Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature                                                                                  UnitsDescription
countscansscansNumber of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant’s phone
uniquedevicesdevicesNumber of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses (bt_address)
meanscansscansMean of the scans of every sensed device within each time segment instance
stdscansscansStandard deviation of the scans of every sensed device within each time segment instance
countscansmostfrequentdevicewithinsegmentsscansNumber of scans of the most sensed device within each time segment instance
countscansleastfrequentdevicewithinsegmentsscansNumber of scans of the least sensed device within each time segment instance
countscansmostfrequentdeviceacrosssegmentsscansNumber of scans of the most sensed device across time segment instances of the same type
countscansleastfrequentdeviceacrosssegmentsscansNumber of scans of the least sensed device across time segment instances of the same type per device
countscansmostfrequentdeviceacrossdatasetscansNumber of scans of the most sensed device across the entire dataset of every participant
countscansleastfrequentdeviceacrossdatasetscansNumber of scans of the least sensed device across the entire dataset of every participant
+
+

Assumptions/Observations

+
    +
  • Devices are classified as belonging to the participant (own) or to other people (others) using k-means based on the number of times and the number of days each device was detected across each participant’s dataset. See Doryab et al for more details.
  • +
  • If ownership cannot be computed because all devices were detected on only one day, they are all considered as other. Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have.
  • +
  • The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don’t.
  • +
+
Example
Simplified raw bluetooth data

The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. +

local_date  segment     bt_address                              own_device
+2016-11-29  morning     55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-29  morning     55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-29  morning     55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-29  morning     55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-29  morning     48872A52-68DE-420D-98DA-73339A1C4685              0
+2016-11-29  afternoon   55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-29  afternoon   48872A52-68DE-420D-98DA-73339A1C4685              0
+2016-11-30  morning     55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2016-11-30  morning     48872A52-68DE-420D-98DA-73339A1C4685              0
+2016-11-30  morning     25262DC7-780C-4AD5-AD3A-D9776AEF7FC1              0
+2016-11-30  morning     5B1E6981-2E50-4D9A-99D8-67AED430C5A8              0
+2016-11-30  morning     5B1E6981-2E50-4D9A-99D8-67AED430C5A8              0
+2016-11-30  afternoon   55C836F5-487E-405F-8E28-21DBD40FA4FF              1
+2017-05-07  morning     5C5A9C41-2F68-4CEB-96D0-77DE3729B729              0
+2017-05-07  morning     25262DC7-780C-4AD5-AD3A-D9776AEF7FC1              0
+2017-05-07  morning     5B1E6981-2E50-4D9A-99D8-67AED430C5A8              0
+2017-05-07  morning     6C444841-FE64-4375-BC3F-FA410CDC0AC7              0
+2017-05-07  morning     4DC7A22D-9F1F-4DEF-8576-086910AABCB5              0
+2017-05-07  afternoon   5B1E6981-2E50-4D9A-99D8-67AED430C5A8              0
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+2017-05-07  afternoon   499A1EAF-DDF1-4657-986C-EA5032104448              1
+

+
+
The most and least frequent OTHER devices (own_device == 0) during morning segments

The most and least frequent ALL|OWN|OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. +

most frequent device across 2016-11-29 morning:   '48872A52-68DE-420D-98DA-73339A1C4685'  (this device is the only one in this instance)
+least frequent device across 2016-11-29 morning:  '48872A52-68DE-420D-98DA-73339A1C4685'  (this device is the only one in this instance)
+most frequent device across 2016-11-30 morning:   '5B1E6981-2E50-4D9A-99D8-67AED430C5A8'
+least frequent device across 2016-11-30 morning:  '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1'  (when tied, the first occurance is chosen)
+most frequent device across 2017-05-07 morning:   '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1'  (when tied, the first occurance is chosen)
+least frequent device across 2017-05-07 morning:  '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1'  (when tied, the first occurance is chosen)
+
+most frequent across morning segments:            '5B1E6981-2E50-4D9A-99D8-67AED430C5A8'
+least frequent across morning segments:           '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen)
+
+most frequent across dataset:                     '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account "morning" segments)
+least frequent across dataset:                    '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen)
+

+
+
Bluetooth features for OTHER devices and morning segments

For brevity we only show the following features for morning segments: +

OTHER: 
+    DEVICES: ["countscans", "uniquedevices", "meanscans", "stdscans"]
+    SCANS_MOST_FREQUENT_DEVICE: ["withinsegments", "acrosssegments", "acrossdataset"]
+

+

Note that countscansmostfrequentdeviceacrossdatasetothers is all 0s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other). +

local_segment       countscansothers    uniquedevicesothers meanscansothers stdscansothers  countscansmostfrequentdevicewithinsegmentsothers    countscansmostfrequentdeviceacrosssegmentsothers    countscansmostfrequentdeviceacrossdatasetothers
+2016-11-29-morning  1                   1                   1.000000        NaN             1                                                   0.0                                                 0.0
+2016-11-30-morning  4                   3                   1.333333        0.57735         2                                                   2.0                                                 2.0
+2017-05-07-morning  5                   5                   1.000000        0.00000         1                                                   1.0                                                 1.0
+

+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-calls/index.html b/1.3/features/phone-calls/index.html new file mode 100644 index 00000000..054c1001 --- /dev/null +++ b/1.3/features/phone-calls/index.html @@ -0,0 +1,2100 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Calls - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Calls

+

Sensor parameters description for [PHONE_CALLS]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the calls data is stored
+

RAPIDS Provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_calls_raw.csv
+- data/raw/{pid}/phone_calls_with_datetime.csv
+- data/interim/{pid}/phone_calls_features/phone_calls_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_calls.csv
+
+
+

Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + +
Key                       Description
[COMPUTE]Set to True to extract PHONE_CALLS features from the RAPIDS provider
[CALL_TYPES]The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed.
[FEATURES]Features to be computed for outgoing, incoming, and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below.
+

Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countcallsNumber of calls of a particular call_type occurred during a particular time_segment.
distinctcontactscontactsNumber of distinct contacts that are associated with a particular call_type for a particular time_segment
meandurationsecondsThe mean duration of all calls of a particular call_type during a particular time_segment.
sumdurationsecondsThe sum of the duration of all calls of a particular call_type during a particular time_segment.
mindurationsecondsThe duration of the shortest call of a particular call_type during a particular time_segment.
maxdurationsecondsThe duration of the longest call of a particular call_type during a particular time_segment.
stddurationsecondsThe standard deviation of the duration of all the calls of a particular call_type during a particular time_segment.
modedurationsecondsThe mode of the duration of all the calls of a particular call_type during a particular time_segment.
entropydurationnatsThe estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment.
timefirstcallminutesThe time in minutes between 12:00am (midnight) and the first call of call_type.
timelastcallminutesThe time in minutes between 12:00am (midnight) and the last call of call_type.
countmostfrequentcontactcallsThe number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period.
+

Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countcallsNumber of missed calls that occurred during a particular time_segment.
distinctcontactscontactsNumber of distinct contacts that are associated with missed calls for a particular time_segment
timefirstcallminutesThe time in hours from 12:00am (Midnight) that the first missed call occurred.
timelastcallminutesThe time in hours from 12:00am (Midnight) that the last missed call occurred.
countmostfrequentcontactcallsThe number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period.
+
+

Assumptions/Observations

+
    +
  1. Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces.
  2. +
  3. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing
  4. +
  5. iOS calls data is transformed to match Android calls data format. See our algorithm
  6. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-conversation/index.html b/1.3/features/phone-conversation/index.html new file mode 100644 index 00000000..9cc81be5 --- /dev/null +++ b/1.3/features/phone-conversation/index.html @@ -0,0 +1,2154 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Conversation - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Conversation

+

Sensor parameters description for [PHONE_CONVERSATION]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER][ANDROID]Data stream container (database table, CSV file, etc.) where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS)
[CONTAINER][IOS]Data stream container (database table, CSV file, etc.) where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS)
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_conversation_raw.csv
+- data/raw/{pid}/phone_conversation_with_datetime.csv
+- data/interim/{pid}/phone_conversation_features/phone_conversation_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_conversation.csv
+
+
+

Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[RECORDING_MINUTES]Minutes the plugin was recording audio (default 1 min)
[PAUSED_MINUTES]Minutes the plugin was NOT recording audio (default 3 min)
+

Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
minutessilenceminutesMinutes labeled as silence
minutesnoiseminutesMinutes labeled as noise
minutesvoiceminutesMinutes labeled as voice
minutesunknownminutesMinutes labeled as unknown
sumconversationdurationminutesTotal duration of all conversations
maxconversationdurationminutesLongest duration of all conversations
minconversationdurationminutesShortest duration of all conversations
avgconversationdurationminutesAverage duration of all conversations
sdconversationdurationminutesStandard Deviation of the duration of all conversations
timefirstconversationminutesMinutes since midnight when the first conversation for a time segment was detected
timelastconversationminutesMinutes since midnight when the last conversation for a time segment was detected
noisesumenergyL2-normSum of all energy values when inference is noise
noiseavgenergyL2-normAverage of all energy values when inference is noise
noisesdenergyL2-normStandard Deviation of all energy values when inference is noise
noiseminenergyL2-normMinimum of all energy values when inference is noise
noisemaxenergyL2-normMaximum of all energy values when inference is noise
voicesumenergyL2-normSum of all energy values when inference is voice
voiceavgenergyL2-normAverage of all energy values when inference is voice
voicesdenergyL2-normStandard Deviation of all energy values when inference is voice
voiceminenergyL2-normMinimum of all energy values when inference is voice
voicemaxenergyL2-normMaximum of all energy values when inference is voice
silencesensedfraction-Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown)
noisesensedfraction-Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown)
voicesensedfraction-Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown)
unknownsensedfraction-Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown)
silenceexpectedfraction-Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes)
noiseexpectedfraction-Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes)
voiceexpectedfraction-Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes)
unknownexpectedfraction-Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes)
+
+

Assumptions/Observations

+
    +
  1. The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android’s format
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-data-yield/index.html b/1.3/features/phone-data-yield/index.html new file mode 100644 index 00000000..10d4850c --- /dev/null +++ b/1.3/features/phone-data-yield/index.html @@ -0,0 +1,2052 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Data Yield - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Data Yield

+

This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows (time segments) that do not contain enough data. You should decide what is your “enough” threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle.

+
+

Why is data yield important?

+

Imagine that you want to extract PHONE_CALL features on daily segments (00:00 to 23:59). Let’s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It’s more likely that other calls were placed on the 22 hours of data that you didn’t log on day 2 than on the 1 hour of data you didn’t log on day 1, and so including day 2 in your analysis could bias your results.

+
+

Sensor parameters description for [PHONE_DATA_YIELD]:

+ + + + + + + + + + + + + +
Key                   Description
[SENSORS]One or more phone sensor config keys (e.g. PHONE_MESSAGE). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below (do NOT include Fitbit sensors, ONLY include phone sensors).
+
+

Supported phone sensors for [PHONE_DATA_YIELD][SENSORS]

+
PHONE_ACCELEROMETER
+PHONE_ACTIVITY_RECOGNITION
+PHONE_APPLICATIONS_CRASHES
+PHONE_APPLICATIONS_FOREGROUND
+PHONE_APPLICATIONS_NOTIFICATIONS
+PHONE_BATTERY
+PHONE_BLUETOOTH
+PHONE_CALLS
+PHONE_CONVERSATION
+PHONE_KEYBOARD
+PHONE_LIGHT
+PHONE_LOCATIONS
+PHONE_LOG
+PHONE_MESSAGES
+PHONE_SCREEN
+PHONE_WIFI_CONNECTED
+PHONE_WIFI_VISIBLE
+
+
+

RAPIDS provider

+

Before explaining the data yield features, let’s define the following relevant concepts:

+
    +
  • A valid minute is any 60 second window when any phone sensor logged at least 1 row of data
  • +
  • A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS]
  • +
+

The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below.

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/{sensor}_raw.csv # one for every [PHONE_DATA_YIELD][SENSORS]
+- data/interim/{pid}/phone_yielded_timestamps.csv
+- data/interim/{pid}/phone_yielded_timestamps_with_datetime.csv
+- data/interim/{pid}/phone_data_yield_features/phone_data_yield_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_data_yield.csv
+
+
+

Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS]The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid.
+

Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
ratiovalidyieldedminutes-The ratio between the number of valid minutes and the duration in minutes of a time segment.
ratiovalidyieldedhours-The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1.
+
+

Assumptions/Observations

+
    +
  1. +

    We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together.

    +

    For example, let’s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur:

    +

      +
    1. the 12 missing hours are from the beginning of the segment or
    2. +
    3. 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours).
    4. +

    +

    ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b. In other words, sensed data for b is more evenly spread compared to a.

    +
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-keyboard/index.html b/1.3/features/phone-keyboard/index.html new file mode 100644 index 00000000..1537a715 --- /dev/null +++ b/1.3/features/phone-keyboard/index.html @@ -0,0 +1,1973 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Keyboard - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Keyboard

+

Sensor parameters description for [PHONE_KEYBOARD]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the keyboard data is stored
+
+

File Sequence

+
- data/raw/{pid}/phone_keyboard_raw.csv
+- data/raw/{pid}/phone_keyboard_with_datetime.csv
+- data/interim/{pid}/phone_keyboard_features/phone_keyboard_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_keyboard.csv
+
+
+

Features description for [PHONE_KEYBOARD]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
sessioncount-Number of typing sessions in a time segment. A session begins with any keypress and finishes until 5 seconds have elapsed since the last key was pressed or the application that the user was typing on changes.
averagesessionlengthmillisecondsAverage length of all sessions in a time segment instance
averageinterkeydelaymillisecondsThe average time between keystrokes measured in milliseconds.
changeintextlengthlessthanminusoneNumber of times a keyboard typing or swiping event changed the length of the current text to less than one fewer character.
changeintextlengthequaltominusoneNumber of times a keyboard typing or swiping event changed the length of the current text in exactly one fewer character.
changeintextlengthequaltooneNumber of times a keyboard typing or swiping event changed the length of the current text in exactly one more character.
changeintextlengthmorethanoneNumber of times a keyboard typing or swiping event changed the length of the current text to more than one character.
maxtextlengthLength in characters of the longest sentence(s) contained in the typing text box of any app during the time segment.
lastmessagelengthLength of the last text in characters of the sentence(s) contained in the typing text box of any app during the time segment.
totalkeyboardtouchesAverage number of typing events across all sessions in a time segment instance.
+
+

Note

+

We did not find a reliable way to distinguish between AutoCorrect or AutoComplete changes, since both can be applied with a single touch or swipe event and can decrease or increase the length of the text by an arbitrary number of characters.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-light/index.html b/1.3/features/phone-light/index.html new file mode 100644 index 00000000..cff6e91a --- /dev/null +++ b/1.3/features/phone-light/index.html @@ -0,0 +1,2025 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Light - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Light

+

Sensor parameters description for [PHONE_LIGHT]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the light data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_light_raw.csv
+- data/raw/{pid}/phone_light_with_datetime.csv
+- data/interim/{pid}/phone_light_features/phone_light_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_light.csv
+
+
+

Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_LIGHT features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countrowsNumber light sensor rows recorded.
maxluxluxThe maximum ambient luminance.
minluxluxThe minimum ambient luminance.
avgluxluxThe average ambient luminance.
medianluxluxThe median ambient luminance.
stdluxluxThe standard deviation of ambient luminance.
+
+

Assumptions/Observations

+

NA

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-locations/index.html b/1.3/features/phone-locations/index.html new file mode 100644 index 00000000..e340551a --- /dev/null +++ b/1.3/features/phone-locations/index.html @@ -0,0 +1,2378 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Locations - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Locations

+

Sensor parameters description for [PHONE_LOCATIONS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                                                                       Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the location data is stored
[LOCATIONS_TO_USE]Type of location data to use, one of ALL, GPS, ALL_RESAMPLED or FUSED_RESAMPLED. This filter is based on the provider column of the locations table, ALL includes every row, GPS only includes rows where the provider is gps, ALL_RESAMPLED includes all rows after being resampled, and FUSED_RESAMPLED only includes rows where the provider is fused after being resampled.
[FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD]if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes).
[FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION]if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled at most for this long (in minutes)
+
+

Assumptions/Observations

+

Types of location data to use +Android and iOS clients can collect location coordinates through the phone’s GPS, the network cellular towers around the phone, or Google’s fused location API.

+
    +
  • If you want to use only the GPS provider, set [LOCATIONS_TO_USE] to GPS
  • +
  • If you want to use all providers, set [LOCATIONS_TO_USE] to ALL
  • +
  • If you collected location data from different providers, including the fused API, use ALL_RESAMPLED
  • +
  • If your mobile client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED.
  • +
+

ALL_RESAMPLED and RESAMPLE_FUSED take the original location coordinates and replicate each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS]. This is done because Google’s API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one and because GPS and network providers can log data at variable rates.

+

There are two parameters associated with resampling fused location.

+
    +
  1. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair. For example, participant A’s phone did not collect data between 10.30 am and 10:50 am and between 11:05am and 11:40am, the last known coordinate pair is replicated during the first period but not the second. In other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes.
  2. +
  3. FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer than this threshold even if the phone was sensing data continuously. For example, participant A went home at 9 pm, and their phone was sensing data without gaps until 11 am the next morning, the last known location is replicated until 9 am.
  4. +
+

If you have suggestions to modify or improve this resampling, let us know.

+
+

BARNETT provider

+

These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al.

+
+

Available time segments and platforms

+
    +
  • Available only for segments that start at 00:00:00 and end at 23:59:59 of the same or a different day (daily, weekly, weekend, etc.)
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_locations_raw.csv
+- data/interim/{pid}/phone_locations_processed.csv
+- data/interim/{pid}/phone_locations_processed_with_datetime.csv
+- data/interim/{pid}/phone_locations_features/phone_locations_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_locations.csv
+
+
+

Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                           Description
[COMPUTE]Set to True to extract PHONE_LOCATIONS features from the BARNETT provider
[FEATURES]Features to be computed, see table below
[ACCURACY_LIMIT]An integer in meters, any location rows with an accuracy higher than this is dropped. This number means there’s a 68% probability the actual location is within this radius
[IF_MULTIPLE_TIMEZONES]Currently, USE_MOST_COMMON is the only value supported. If the location data for a participant belongs to multiple time zones, we select the most common because Barnett’s algorithm can only handle one time zone
[MINUTES_DATA_USED]Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough.
+

Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
hometimeminutesTime at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am, including any pauses within a 200-meter radius.
disttravelledmetersTotal distance traveled over a day (flights).
rogmetersThe Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day, and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place.
maxdiammetersThe maximum diameter is the largest distance between any two pauses.
maxhomedistmetersThe maximum distance from home in meters.
siglocsvisitedlocationsThe number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another.
avgflightlenmetersMean length of all flights.
stdflightlenmetersStandard deviation of the length of all flights.
avgflightdursecondsMean duration of all flights.
stdflightdursecondsThe standard deviation of the duration of all flights.
probpause-The fraction of a day spent in a pause (as opposed to a flight)
siglocentropynatsShannon’s entropy measurement is based on the proportion of time spent at each significant location visited during a day.
circdnrtn-A continuous metric quantifying a person’s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day.
wkenddayrtn-Same as circdnrtn but computed separately for weekends and weekdays.
+
+

Assumptions/Observations

+

Multi day segment features +Barnett’s features are only available on time segments that span entire days (00:00:00 to 23:59:59). Such segments can be one-day long (daily) or multi-day (weekly, for example). Multi-day segment features are computed based on daily features summarized the following way:

+
    +
  • sum for hometime, disttravelled, siglocsvisited, and minutes_data_used
  • +
  • max for maxdiam, and maxhomedist
  • +
  • mean for rog, avgflightlen, stdflightlen, avgflightdur, stdflightdur, probpause, siglocentropy, circdnrtn, wkenddayrtn, and minsmissing
  • +
+

Computation speed +The process to extract these features can be slow compared to other sensors and providers due to the required simulation.

+

How are these features computed? +These features are based on a Pause-Flight model. A pause is defined as a mobility trace (location pings) within a certain duration and distance (by default, 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS, we only expose one parameter for these features (accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R.

+

Significant Locations +Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al.

+

The Circadian Calculation +For a detailed description of how this is calculated, see Canzian et al.

+
+

DORYAB provider

+

These features are based on the original implementation by Doryab et al..

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_locations_raw.csv
+- data/interim/{pid}/phone_locations_processed.csv
+- data/interim/{pid}/phone_locations_processed_with_datetime.csv
+- data/interim/{pid}/phone_locations_processed_with_datetime_with_doryab_columns.csv
+- data/interim/{pid}/phone_locations_features/phone_locations_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_locations.csv
+
+
+

Parameters description for [PHONE_LOCATIONS][PROVIDERS][DORYAB]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                         Description
[COMPUTE]Set to True to extract PHONE_LOCATIONS features from the BARNETT provider
[FEATURES]Features to be computed, see table below
[ACCURACY_LIMIT]An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there’s a 68% probability the true location is within this radius
[DBSCAN_EPS]The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function.
[DBSCAN_MINSAMPLES]The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself.
[THRESHOLD_STATIC]It is the threshold value in km/hr which labels a row as Static or Moving.
[MAXIMUM_ROW_GAP]The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the phone was not sensing. This value must be larger than your GPS sampling interval when [LOCATIONS_TO_USE] is ALL or GPS, otherwise all the stationary-related features will be NA. If [LOCATIONS_TO_USE] is ALL_RESAMPLED or FUSED_RESAMPLED, you can use the default value as every row will be resampled at 1-minute intervals.
[MINUTES_DATA_USED]Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough.
[CLUSTER_ON]Set this flag to PARTICIPANT_DATASET to create clusters based on the entire participant’s dataset or to TIME_SEGMENT to create clusters based on all the instances of the corresponding time segment (e.g. all mornings) or to TIME_SEGMENT_INSTANCE to create clusters based on a single instance (e.g. 2020-05-20’s morning).
[INFER_HOME_LOCATION_STRATEGY]The strategy applied to infer home locations. Set to DORYAB_STRATEGY to infer one home location for the entire dataset of each participant or to SUN_LI_VEGA_STRATEGY to infer one home location per day per participant. See Observations below to know more.
[MINIMUM_DAYS_TO_DETECT_HOME_CHANGES]The minimum number of consecutive days a new home location candidate has to repeat before it is considered the participant’s new home. This parameter will be used only when [INFER_HOME_LOCATION_STRATEGY] is set to SUN_LI_VEGA_STRATEGY.
[CLUSTERING_ALGORITHM]The original Doryab et al. implementation uses DBSCAN, OPTICS is also available with similar (but not identical) clustering results and lower memory consumption.
[RADIUS_FOR_HOME]All location coordinates within this distance (meters) from the home location coordinates are considered a homestay (see timeathome feature).
+

Features description for [PHONE_LOCATIONS][PROVIDERS][DORYAB]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
locationvariance\(meters^2\)The sum of the variances of the latitude and longitude columns.
loglocationvariance-Log of the sum of the variances of the latitude and longitude columns.
totaldistancemetersTotal distance traveled in a time segment using the haversine formula.
avgspeedkm/hrAverage speed in a time segment considering only the instances labeled as Moving.
varspeedkm/hrSpeed variance in a time segment considering only the instances labeled as Moving.
circadianmovement-Deprecated, see Observations below. “It encodes the extent to which a person’s location patterns follow a 24-hour circadian cycle." Doryab et al..
numberofsignificantplacesplacesNumber of significant locations visited. It is calculated using the DBSCAN/OPTICS clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place.
numberlocationtransitionstransitionsNumber of movements between any two clusters in a time segment.
radiusgyrationmetersQuantifies the area covered by a participant
timeattop1locationminutesTime spent at the most significant location.
timeattop2locationminutesTime spent at the 2nd most significant location.
timeattop3locationminutesTime spent at the 3rd most significant location.
movingtostaticratio-Ratio between stationary time and total location sensed time. A lat/long coordinate pair is labeled as stationary if its speed (distance/time) to the next coordinate pair is less than 1km/hr. A higher value represents a more stationary routine.
outlierstimepercent-Ratio between the time spent in non-significant clusters divided by the time spent in all clusters (stationary time. Only stationary samples are clustered). A higher value represents more time spent in non-significant clusters.
maxlengthstayatclustersminutesMaximum time spent in a cluster (significant location).
minlengthstayatclustersminutesMinimum time spent in a cluster (significant location).
avglengthstayatclustersminutesAverage time spent in a cluster (significant location).
stdlengthstayatclustersminutesStandard deviation of time spent in a cluster (significant location).
locationentropynatsShannon Entropy computed over the row count of each cluster (significant location), it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location).
normalizedlocationentropynatsShannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters; it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location).
timeathomeminutesTime spent at home (see Observations below for a description on how we compute home).
homelabel-An integer that represents a different home location. It will be a constant number (1) for all participants when [INFER_HOME_LOCATION_STRATEGY] is set to DORYAB_STRATEGY or an incremental index if the strategy is set to SUN_LI_VEGA_STRATEGY.
+
+

Assumptions/Observations

+

Significant Locations Identified +Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection.

+

Circadian Movement Calculation +Note Feb 3 2021. It seems the implementation of this feature is not correct; we suggest not to use this feature until a fix is in place. For a detailed description of how this should be calculated, see Saeb et al.

+

Fine-Tuning Clustering Parameters +Based on an experiment where we collected fused location data for 7 days with a mean accuracy of 86 & SD of 350.874635, we determined that EPS/MAX_EPS=100 produced closer clustering results to reality. Higher values (>100) missed out on some significant places, like a short grocery visit, while lower values (<100) picked up traffic lights and stop signs while driving as significant locations. We recommend you set EPS based on your location data’s accuracy (the more accurate your data is, the lower you should be able to set EPS).

+

Duration Calculation +To calculate the time duration component for our features, we compute the difference between consecutive rows’ timestamps to take into account sampling rate variability. If this time difference is larger than a threshold (300 seconds by default), we replace it with NA and label that row as Moving.

+

Home location

+
    +
  • +

    DORYAB_STRATEGY: home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm (DBSCAN or OPTICS) and considering the center of the biggest cluster home for that participant.

    +
  • +
  • +

    SUN_LI_VEGA_STRATEGY: home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm (DBSCAN or OPTICS). The following steps are used to infer the home location per day for that participant:

    +
      +
    1. +

      if there are records within [03:30:00, 04:30:00] for that night:
      +     we choose the most common cluster during that period as a home candidate for that day.
      + elif there are records within [midnight, 03:30:00) for that night:
      +     we choose the last valid cluster during that period as a home candidate for that day.
      + elif there are records within (04:30:00, 06:00:00] for that night:
      +     we choose the first valid cluster during that period as a home candidate for that day.
      + else:
      +     the home location is NA (missing) for that day.

      +
    2. +
    3. +

      If the count of consecutive days with the same candidate home location cluster label is larger or equal to [MINIMUM_DAYS_TO_DETECT_HOME_CHANGES], + the candidate will be regarded as the home cluster; otherwise, the home cluster will be the last valid day’s cluster. + If there are no valid clusters before that day, the first home location in the days after is used.

      +
    4. +
    +
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-log/index.html b/1.3/features/phone-log/index.html new file mode 100644 index 00000000..ade6b0c7 --- /dev/null +++ b/1.3/features/phone-log/index.html @@ -0,0 +1,1907 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Log - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Log

+

Sensor parameters description for [PHONE_LOG]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[CONTAINER][ANDROID]Data stream container (database table, CSV file, etc.) where a data log is stored for Android devices
[CONTAINER][IOS]Data stream container (database table, CSV file, etc.) where a data log is stored for iOS devices
+
+

Note

+

No feature providers have been implemented for this sensor yet, however you can use its key (PHONE_LOG) to improve PHONE_DATA_YIELD or you can implement your own features.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-messages/index.html b/1.3/features/phone-messages/index.html new file mode 100644 index 00000000..003fba33 --- /dev/null +++ b/1.3/features/phone-messages/index.html @@ -0,0 +1,2026 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Messages - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Messages

+

Sensor parameters description for [PHONE_MESSAGES]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the messages data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_messages_raw.csv
+- data/raw/{pid}/phone_messages_with_datetime.csv
+- data/interim/{pid}/phone_messages_features/phone_messages_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_messages.csv
+
+
+

Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_MESSAGES features from the RAPIDS provider
[MESSAGES_TYPES]The messages_type that will be analyzed. The options for this parameter are received or sent.
[FEATURES]Features to be computed, see table below for [MESSAGES_TYPES] received and sent
+

Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countmessagesNumber of messages of type messages_type that occurred during a particular time_segment.
distinctcontactscontactsNumber of distinct contacts that are associated with a particular messages_type during a particular time_segment.
timefirstmessagesminutesNumber of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment.
timelastmessagesminutesNumber of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment.
countmostfrequentcontactmessagesNumber of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant.
+
+

Assumptions/Observations

+
    +
  1. [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent
  2. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-screen/index.html b/1.3/features/phone-screen/index.html new file mode 100644 index 00000000..409e92b7 --- /dev/null +++ b/1.3/features/phone-screen/index.html @@ -0,0 +1,2061 @@ + + + + + + + + + + + + + + + + + + + + + + Phone Screen - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone Screen

+

Sensor parameters description for [PHONE_SCREEN]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the screen data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_screen_raw.csv
+- data/raw/{pid}/phone_screen_with_datetime.csv
+- data/interim/{pid}/phone_screen_episodes.csv
+- data/interim/{pid}/phone_screen_episodes_resampled.csv
+- data/interim/{pid}/phone_screen_episodes_resampled_with_datetime.csv
+- data/interim/{pid}/phone_screen_features/phone_screen_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_screen.csv
+
+
+

Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                                                          Description
[COMPUTE]Set to True to extract PHONE_SCREEN features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
[REFERENCE_HOUR_FIRST_USE]The reference point from which firstuseafter is to be computed, default is midnight
[IGNORE_EPISODES_SHORTER_THAN]Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter.
[IGNORE_EPISODES_LONGER_THAN]Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter.
[EPISODE_TYPES]Currently we only support unlock episodes (from when the phone is unlocked until the screen is off)
+

Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
sumdurationminutesTotal duration of all unlock episodes.
maxdurationminutesLongest duration of any unlock episode.
mindurationminutesShortest duration of any unlock episode.
avgdurationminutesAverage duration of all unlock episodes.
stddurationminutesStandard deviation duration of all unlock episodes.
countepisodeepisodesNumber of all unlock episodes
firstuseafterminutesMinutes until the first unlock episode.
+ + +
+

Assumptions/Observations

+
    +
  1. +

    In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event.

    +
  2. +
  3. +

    Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off. In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF) but these are not implemented at the moment.

    +
  4. +
  5. +

    We transform iOS screen events to match Android’s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.

    +
  6. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-wifi-connected/index.html b/1.3/features/phone-wifi-connected/index.html new file mode 100644 index 00000000..c8984904 --- /dev/null +++ b/1.3/features/phone-wifi-connected/index.html @@ -0,0 +1,2013 @@ + + + + + + + + + + + + + + + + + + + + + + Phone WiFI Connected - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone WiFi Connected

+

Sensor parameters description for [PHONE_WIFI_CONNECTED]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the wifi (connected) data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android and iOS
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_wifi_connected_raw.csv
+- data/raw/{pid}/phone_wifi_connected_with_datetime.csv
+- data/interim/{pid}/phone_wifi_connected_features/phone_wifi_connected_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_wifi_connected.csv
+
+
+

Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countscansdevicesNumber of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately
uniquedevicesdevicesNumber of unique access point during a time_segment as identified by their hardware address
countscansmostuniquedevicescansNumber of scans of the most scanned access point during a time_segment across the whole monitoring period
+
+

Assumptions/Observations

+
    +
  1. A connected WiFI access point is one that a phone was connected to.
  2. +
  3. By default AWARE stores this data in the sensor_wifi table.
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/features/phone-wifi-visible/index.html b/1.3/features/phone-wifi-visible/index.html new file mode 100644 index 00000000..fd70d6a8 --- /dev/null +++ b/1.3/features/phone-wifi-visible/index.html @@ -0,0 +1,2013 @@ + + + + + + + + + + + + + + + + + + + + + + Phone WiFI Visible - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Phone WiFi Visible

+

Sensor parameters description for [PHONE_WIFI_VISIBLE]:

+ + + + + + + + + + + + + +
Key                             Description
[CONTAINER]Data stream container (database table, CSV file, etc.) where the wifi (visible) data is stored
+

RAPIDS provider

+
+

Available time segments and platforms

+
    +
  • Available for all time segments
  • +
  • Available for Android only
  • +
+
+
+

File Sequence

+
- data/raw/{pid}/phone_wifi_visible_raw.csv
+- data/raw/{pid}/phone_wifi_visible_with_datetime.csv
+- data/interim/{pid}/phone_wifi_visible_features/phone_wifi_visible_{language}_{provider_key}.csv
+- data/processed/features/{pid}/phone_wifi_visible.csv
+
+
+

Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + +
Key                             Description
[COMPUTE]Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider
[FEATURES]Features to be computed, see table below
+

Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS]:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUnitsDescription
countscansdevicesNumber of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately
uniquedevicesdevicesNumber of unique access point during a time_segment as identified by their hardware address
countscansmostuniquedevicescansNumber of scans of the most scanned access point during a time_segment across the whole monitoring period
+
+

Assumptions/Observations

+
    +
  1. A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS.
  2. +
  3. By default AWARE stores this data in the wifi table.
  4. +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/img/analysis_workflow.png b/1.3/img/analysis_workflow.png new file mode 100644 index 00000000..89aab053 Binary files /dev/null and b/1.3/img/analysis_workflow.png differ diff --git a/1.3/img/calls.csv b/1.3/img/calls.csv new file mode 100644 index 00000000..530f275a --- /dev/null +++ b/1.3/img/calls.csv @@ -0,0 +1,9 @@ +"_id","timestamp","device_id","call_type","call_duration","trace" +1,1587663260695,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",2,14,"d5e84f8af01b2728021d4f43f53a163c0c90000c" +2,1587739118007,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",3,0,"47c125dc7bd163b8612cdea13724a814917b6e93" +5,1587746544891,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",2,95,"9cc793ffd6e88b1d850ce540b5d7e000ef5650d4" +6,1587911379859,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",2,63,"51fb9344e988049a3fec774c7ca622358bf80264" +7,1587992647361,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",3,0,"2a862a7730cfdfaf103a9487afe3e02935fd6e02" +8,1588020039448,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",1,11,"a2c53f6a086d98622c06107780980cf1bb4e37bd" +11,1588176189024,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",2,65,"56589df8c830c70e330b644921ed38e08d8fd1f3" +12,1588197745079,"a748ee1a-1d0b-4ae9-9074-279a2b6ba524",3,0,"cab458018a8ed3b626515e794c70b6f415318adc" diff --git a/1.3/img/dataflow.png b/1.3/img/dataflow.png new file mode 100644 index 00000000..241162d2 Binary files /dev/null and b/1.3/img/dataflow.png differ diff --git a/1.3/img/features_fitbit_sleep_intraday.png b/1.3/img/features_fitbit_sleep_intraday.png new file mode 100644 index 00000000..7af0ac82 Binary files /dev/null and b/1.3/img/features_fitbit_sleep_intraday.png differ diff --git a/1.3/img/files.png b/1.3/img/files.png new file mode 100644 index 00000000..495a059c Binary files /dev/null and b/1.3/img/files.png differ diff --git a/1.3/img/h-data-yield.html b/1.3/img/h-data-yield.html new file mode 100644 index 00000000..7952a329 --- /dev/null +++ b/1.3/img/h-data-yield.html @@ -0,0 +1,3 @@ +
+
+
\ No newline at end of file diff --git a/1.3/img/h-data-yield.png b/1.3/img/h-data-yield.png new file mode 100644 index 00000000..859e7cf9 Binary files /dev/null and b/1.3/img/h-data-yield.png differ diff --git a/1.3/img/hm-data-yield-participants-absolute-time.html b/1.3/img/hm-data-yield-participants-absolute-time.html new file mode 100644 index 00000000..e902c07c --- /dev/null +++ b/1.3/img/hm-data-yield-participants-absolute-time.html @@ -0,0 +1,11 @@ +
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file diff --git a/1.3/img/hm-data-yield-participants-absolute-time.png b/1.3/img/hm-data-yield-participants-absolute-time.png new file mode 100644 index 00000000..4129a8ee Binary files /dev/null and b/1.3/img/hm-data-yield-participants-absolute-time.png differ diff --git a/1.3/img/hm-data-yield-participants-relative-time.html b/1.3/img/hm-data-yield-participants-relative-time.html new file mode 100644 index 00000000..7c7366f3 --- /dev/null +++ b/1.3/img/hm-data-yield-participants-relative-time.html @@ -0,0 +1,11 @@ +
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file diff --git a/1.3/img/hm-data-yield-participants-relative-time.png b/1.3/img/hm-data-yield-participants-relative-time.png new file mode 100644 index 00000000..20f8caa3 Binary files /dev/null and b/1.3/img/hm-data-yield-participants-relative-time.png differ diff --git a/1.3/img/hm-data-yield-participants.html b/1.3/img/hm-data-yield-participants.html new file mode 100644 index 00000000..bbb00c75 --- /dev/null +++ b/1.3/img/hm-data-yield-participants.html @@ -0,0 +1,191 @@ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
\ No newline at end of file diff --git a/1.3/img/hm-feature-correlations.html b/1.3/img/hm-feature-correlations.html new file mode 100644 index 00000000..99e596fb --- /dev/null +++ b/1.3/img/hm-feature-correlations.html @@ -0,0 +1,96 @@ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
+ + + +
+ +
\ No newline at end of file diff --git a/1.3/img/hm-feature-correlations.png b/1.3/img/hm-feature-correlations.png new file mode 100644 index 00000000..f6e60e46 Binary files /dev/null and b/1.3/img/hm-feature-correlations.png differ diff --git a/1.3/img/hm-phone-sensors.html b/1.3/img/hm-phone-sensors.html new file mode 100644 index 00000000..d47455d4 --- /dev/null +++ b/1.3/img/hm-phone-sensors.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + +Sensors per Minute per Time Segment for All Participants + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+
+
+
+
+
+
+
+ + + + +
+ + + + + + + + + + + + + + + diff --git a/1.3/img/hm-phone-sensors.png b/1.3/img/hm-phone-sensors.png new file mode 100644 index 00000000..3e19e4fe Binary files /dev/null and b/1.3/img/hm-phone-sensors.png differ diff --git a/1.3/img/hm-sensor-rows.html b/1.3/img/hm-sensor-rows.html new file mode 100644 index 00000000..5e90eaf4 --- /dev/null +++ b/1.3/img/hm-sensor-rows.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + +Sensor Row Count per Time Segment For All Participants + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+
+
+
+
+
+
+
+
+ + + + +
+ + + + + + + + + + + + + + + diff --git a/1.3/img/hm-sensor-rows.png b/1.3/img/hm-sensor-rows.png new file mode 100644 index 00000000..c8c6cbcd Binary files /dev/null and b/1.3/img/hm-sensor-rows.png differ diff --git a/1.3/img/logo.png b/1.3/img/logo.png new file mode 100644 index 00000000..73e87d51 Binary files /dev/null and b/1.3/img/logo.png differ diff --git a/1.3/img/logos/cmu.png b/1.3/img/logos/cmu.png new file mode 100644 index 00000000..4ee76401 Binary files /dev/null and b/1.3/img/logos/cmu.png differ diff --git a/1.3/img/logos/dbdp.png b/1.3/img/logos/dbdp.png new file mode 100644 index 00000000..facf5fca Binary files /dev/null and b/1.3/img/logos/dbdp.png differ diff --git a/1.3/img/logos/helsinki.jpg b/1.3/img/logos/helsinki.jpg new file mode 100644 index 00000000..2e03ebe8 Binary files /dev/null and b/1.3/img/logos/helsinki.jpg differ diff --git a/1.3/img/logos/manchester.png b/1.3/img/logos/manchester.png new file mode 100644 index 00000000..5600585e Binary files /dev/null and b/1.3/img/logos/manchester.png differ diff --git a/1.3/img/logos/monash.jpg b/1.3/img/logos/monash.jpg new file mode 100644 index 00000000..70a68b8b Binary files /dev/null and b/1.3/img/logos/monash.jpg differ diff --git a/1.3/img/logos/oulu.png b/1.3/img/logos/oulu.png new file mode 100644 index 00000000..fc534f8d Binary files /dev/null and b/1.3/img/logos/oulu.png differ diff --git a/1.3/img/logos/penn.png b/1.3/img/logos/penn.png new file mode 100644 index 00000000..d4811dd5 Binary files /dev/null and b/1.3/img/logos/penn.png differ diff --git a/1.3/img/logos/pitt.png b/1.3/img/logos/pitt.png new file mode 100644 index 00000000..daf30418 Binary files /dev/null and b/1.3/img/logos/pitt.png differ diff --git a/1.3/img/logos/uw.jpg b/1.3/img/logos/uw.jpg new file mode 100644 index 00000000..89f00b03 Binary files /dev/null and b/1.3/img/logos/uw.jpg differ diff --git a/1.3/img/logos/virginia.jpg b/1.3/img/logos/virginia.jpg new file mode 100644 index 00000000..4baa8893 Binary files /dev/null and b/1.3/img/logos/virginia.jpg differ diff --git a/1.3/img/sleep_intraday_price.png b/1.3/img/sleep_intraday_price.png new file mode 100644 index 00000000..5b715e41 Binary files /dev/null and b/1.3/img/sleep_intraday_price.png differ diff --git a/1.3/img/sleep_intraday_rapids.png b/1.3/img/sleep_intraday_rapids.png new file mode 100644 index 00000000..9a6d1550 Binary files /dev/null and b/1.3/img/sleep_intraday_rapids.png differ diff --git a/1.3/img/sleep_summary_rapids.png b/1.3/img/sleep_summary_rapids.png new file mode 100644 index 00000000..ab0f4a92 Binary files /dev/null and b/1.3/img/sleep_summary_rapids.png differ diff --git a/1.3/img/testing_eventsegments_mtz.png b/1.3/img/testing_eventsegments_mtz.png new file mode 100644 index 00000000..832a3d1d Binary files /dev/null and b/1.3/img/testing_eventsegments_mtz.png differ diff --git a/1.3/index.html b/1.3/index.html new file mode 100644 index 00000000..50c3627f --- /dev/null +++ b/1.3/index.html @@ -0,0 +1,1952 @@ + + + + + + + + + + + + + + + + + + + + + + RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Welcome to RAPIDS documentation

+

Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data, and structure your analysis into reproducible workflows.

+

RAPIDS is open source, documented, multi-platform, modular, tested, and reproducible. At the moment, we support data streams logged by smartphones, Fitbit wearables, and Empatica wearables in collaboration with the DBDP.

+
+

Where do I start?

+

New to RAPIDS? Check our Overview + FAQ and minimal example

+

Install, configure, and execute RAPIDS to extract and plot behavioral features

+

Bugs should be reported on Github issues

+

Questions, discussions, feature requests, and feedback can be posted on our Github discussions

+

Keep up to date with our Twitter feed or Slack channel

+

Do you want to modify or add new functionality to RAPIDS? Check our contributing guide

+

Are you upgrading from RAPIDS 0.4.x or older? Follow this guide

+
+

What are the benefits of using RAPIDS?

+
    +
  1. Consistent analysis. Every participant sensor dataset is analyzed in the same way and isolated from each other.
  2. +
  3. Efficient analysis. Every analysis step is executed only once. Whenever your data or configuration changes, only the affected files are updated.
  4. +
  5. Parallel execution. Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code.
  6. +
  7. Code-free features. Extract any of the behavioral features offered by RAPIDS without writing any code.
  8. +
  9. Extensible code. You can easily add your own data streams or behavioral features in R or Python, share them with the community, and keep authorship and citations.
  10. +
  11. Time zone aware. Your data is adjusted to one or more time zones per participant.
  12. +
  13. Flexible time segments. You can extract behavioral features on time windows of any length (e.g., 5 minutes, 3 hours, 2 days), on every day or particular days (e.g., weekends, Mondays, the 1st of each month, etc.), or around events of interest (e.g., surveys or clinical relapses).
  14. +
  15. Tested code. We are continually adding tests to make sure our behavioral features are correct.
  16. +
  17. Reproducible code. If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended, thanks to R and Python virtual environments. You can share your analysis code along with your publications without any overhead.
  18. +
  19. Private. All your data is processed locally.
  20. +
+

Users and Contributors

+
Community Contributors

Many thanks to our community contributions and the whole team:

+
    +
  • Agam Kumar (CMU)
  • +
  • Yasaman S. Sefidgar (University of Washington)
  • +
  • Joe Kim (Duke University)
  • +
  • Brinnae Bent (Duke University)
  • +
  • Stephen Price (CMU)
  • +
  • Neil Singh (University of Virginia)
  • +
+

Many thanks to the researchers that made their work open source:

+ +
+
Publications using RAPIDS
    +
  • Predicting Symptoms of Depression and Anxiety Using Smartphone and Wearable Data link
  • +
  • Predicting Depression from Smartphone Behavioral Markers Using Machine Learning Methods, Hyper-parameter Optimization, and Feature Importance Analysis: An Exploratory Study link
  • +
  • Digital Biomarkers of Symptom Burden Self-Reported by Perioperative Patients Undergoing Pancreatic Surgery: Prospective Longitudinal Study link
  • +
  • An Automated Machine Learning Pipeline for Monitoring and Forecasting Mobile Health Data link
  • +
+
+
+
carnegie mellon university
+
digital biomarker development pipeline
+
university of helsinki
+
university of manchester
+
monash university
+
oulu university
+
university of pennsylvania
+
university of pittsburgh
+
university of virginia
+
university of washington
+
+ + + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/javascripts/extra.js b/1.3/javascripts/extra.js new file mode 100644 index 00000000..e69de29b diff --git a/1.3/migrating-from-old-versions/index.html b/1.3/migrating-from-old-versions/index.html new file mode 100644 index 00000000..bc62ddf6 --- /dev/null +++ b/1.3/migrating-from-old-versions/index.html @@ -0,0 +1,2001 @@ + + + + + + + + + + + + + + + + + + + + + + Migrating from an old version - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Migration guides

+

Migrating from RAPIDS 0.4.x or older

+

There are four actions that you need to take if you were using RAPIDS 0.4.3 or older (before Feb 9th, 2021):

+
Check the new Overview page

Check the new Overview page. Hopefully, it is a better overview of RAPIDS and provides answers to Frequently Asked Questions.

+
+
Deploy RAPIDS in a new folder
    +
  • Clone RAPIDS 1.x in a new folder (do not pull the updates in your current folder)
  • +
  • Activate your conda environment
  • +
  • Install renv again snakemake -j1 renv_install (for Ubuntu take advantage of the platform specific R renv instructions)
  • +
  • Restore renv packages snakemake -j1 renv_restore (for Ubuntu take advantage of the platform specific R renv instructions)
  • +
  • Move your participant files pxx.yaml to the new folder
  • +
  • Move your time segment files to the new folder
  • +
  • Move your .env file to the new folder
  • +
+
+
Migrate your .env file to the new credentials.yaml format

The .env file is not used anymore, the same credential groups are stored in credentials.yaml, migrate your .env file by running: +

python tools/update_format_env.py
+

+
+
Reconfigure your config.yaml

Reconfigure your config.yaml file by hand (don’t copy and paste the old one). Some keys and values changed but the defaults should be compatible with the things you know from RAPIDS 0.x (see below).

+
+

The most relevant changes to RAPIDS that you need to know about are:

+
We introduced the concept of data streams

RAPIDS abstracts sensor data logged by different devices, platforms and stored in different data containers as data streams.

+

The default data stream for PHONE is aware_mysql, and the default for FITBIT is fitbitjson_mysql. This is compatible with the old functionality (AWARE and JSON Fitbit data stored in MySQL). These values are set in [PHONE_DATA_STREAMS][USE] and [FITBIT_DATA_STREAMS][USE].

+

You can add new data stream formats (sensing apps) and containers (database engines, file types, etc.).

+

If you were processing your Fitbit data either in JSON or plain text (parsed) format, and it was stored in MySQL or CSV files, the changes that you made to your raw data will be compatible. Just choose fitbitjson_mysql, fitbitparsed_mysql, fitbitjson_csv, fitbitparsed_csv accordingly and set it in [FITBIT_DATA_STREAMS][USE].

+

In the future, you will not have to change your raw data; you will be able to just change column mappings/values in the data stream’s format.yaml file.

+
+
We introduced multiple time zones

You can now process data from participants that visited multiple time zones. The default is still a single time zone (America/New_York). See how to handle multiple time zones

+
+
The keyword multiple is now infer

When processing data from smartphones, RAPIDS allows you to infer the OS of a smartphone by using the keyword multiple in the [PLATFORM] key of participant files. Now RAPIDS uses infer instead of multiple Nonetheless, multiple still works for backward compatibility.

+
+
A global DATABASE_GROUP does not exist anymore

There is no global DATABASE_GROUP anymore. Each data stream that needs credentials to connect to a database has its own DATABASE_GROUP config key. The groups are defined in credentials.yaml instead of the .env.

+
+
[DEVICE_SENSOR][TABLE] is now [DEVICE_SENSOR][CONTAINER]

We renamed the keys [DEVICE_SENSOR][TABLE] to [DEVICE_SENSOR][CONTAINER] to reflect that, with the introduction of data streams, they can point to a database table, file, or any other data container.

+
+
Creating participant files from the AWARE_DEVICE_TABLE is deprecated

In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions to create participant files from CSV files:

+
SELECT device_id, device_id as fitbit_id, CONCAT("p", _id) as empatica_id, CONCAT("p", _id) as pid, if(brand = "iPhone", "ios", "android") as platform, CONCAT("p", _id)  as label, DATE_FORMAT(FROM_UNIXTIME((timestamp/1000)- 86400), "%Y-%m-%d") as start_date, CURRENT_DATE as end_date from aware_device order by _id;
+
+
+
SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT

The attributes SCR_SCRIPT and SRC_LANGUAGE of every sensor PROVIDER are replaced by SRC_SCRIPT. SRC_SCRIPT is a relative path from the RAPIDS root folder to that provider’s feature script. We did this to simplify and clarify where the features scripts are stored.

+

There are no actions to take unless you created your own feature provider; update it with your feature script path.

+
+

Migrating from RAPIDS beta

+

If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS.

+

You can start using the RAPIDS 0.1.0 right away, just take into account the following:

+
Deploy RAPIDS in a new folder
    +
  • Install a new copy of RAPIDS (the R and Python virtual environments didn’t change so the cached versions will be reused)
  • +
  • Make sure you don’t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids
  • +
  • Move your old .env file
  • +
  • Move your participant files
  • +
+
+
Migrate your participant files

You can migrate your old participant files to the new YAML format: +

python tools/update_format_participant_files.py
+

+
+
Follow the new Configuration guide

Follow the new Configuration guide

+
+
Learn more about the new way to run RAPIDS

Get familiar with the new way of Executing RAPIDS

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/overrides/main.html b/1.3/overrides/main.html new file mode 100644 index 00000000..928f4ea5 --- /dev/null +++ b/1.3/overrides/main.html @@ -0,0 +1,13 @@ +{% extends "base.html" %} + +{% block disqus %} + + {% include "partials/integrations/utterances.html" %} +{% endblock %} + +{% block outdated %} + You're not viewing the latest stable version of RAPIDS. + + Click here to go to latest. + +{% endblock %} diff --git a/1.3/overrides/partials/integrations/utterances.html b/1.3/overrides/partials/integrations/utterances.html new file mode 100644 index 00000000..9bac8c43 --- /dev/null +++ b/1.3/overrides/partials/integrations/utterances.html @@ -0,0 +1,38 @@ + +{% if not page.is_homepage %} +

{{ lang.t("meta.comments") }}

+ + + + +{% endif %} diff --git a/1.3/search/search_index.json b/1.3/search/search_index.json new file mode 100644 index 00000000..b05a0a56 --- /dev/null +++ b/1.3/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to RAPIDS documentation \u00b6 Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data, and structure your analysis into reproducible workflows. RAPIDS is open source, documented, multi-platform, modular, tested, and reproducible. At the moment, we support data streams logged by smartphones, Fitbit wearables, and Empatica wearables in collaboration with the DBDP . Where do I start? New to RAPIDS? Check our Overview + FAQ and minimal example Install , configure , and execute RAPIDS to extract and plot behavioral features Bugs should be reported on Github issues Questions, discussions, feature requests, and feedback can be posted on our Github discussions Keep up to date with our Twitter feed or Slack channel Do you want to modify or add new functionality to RAPIDS? Check our contributing guide Are you upgrading from RAPIDS 0.4.x or older? Follow this guide What are the benefits of using RAPIDS? \u00b6 Consistent analysis . Every participant sensor dataset is analyzed in the same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes, only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own data streams or behavioral features in R or Python, share them with the community, and keep authorship and citations. Time zone aware . Your data is adjusted to one or more time zones per participant. Flexible time segments . You can extract behavioral features on time windows of any length (e.g., 5 minutes, 3 hours, 2 days), on every day or particular days (e.g., weekends, Mondays, the 1 st of each month, etc.), or around events of interest (e.g., surveys or clinical relapses). Tested code . We are continually adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended, thanks to R and Python virtual environments. You can share your analysis code along with your publications without any overhead. Private . All your data is processed locally. Users and Contributors \u00b6 Community Contributors Many thanks to our community contributions and the whole team : Agam Kumar (CMU) Yasaman S. Sefidgar (University of Washington) Joe Kim (Duke University) Brinnae Bent (Duke University) Stephen Price (CMU) Neil Singh (University of Virginia) Many thanks to the researchers that made their work open source: Panda et al. paper Stachl et al. paper Doryab et al. paper Barnett et al. paper Canzian et al. paper Publications using RAPIDS Predicting Symptoms of Depression and Anxiety Using Smartphone and Wearable Data link Predicting Depression from Smartphone Behavioral Markers Using Machine Learning Methods, Hyper-parameter Optimization, and Feature Importance Analysis: An Exploratory Study link Digital Biomarkers of Symptom Burden Self-Reported by Perioperative Patients Undergoing Pancreatic Surgery: Prospective Longitudinal Study link An Automated Machine Learning Pipeline for Monitoring and Forecasting Mobile Health Data link","title":"Home"},{"location":"#welcome-to-rapids-documentation","text":"Reproducible Analysis Pipeline for Data Streams (RAPIDS) allows you to process smartphone and wearable data to extract and create behavioral features (a.k.a. digital biomarkers), visualize mobile sensor data, and structure your analysis into reproducible workflows. RAPIDS is open source, documented, multi-platform, modular, tested, and reproducible. At the moment, we support data streams logged by smartphones, Fitbit wearables, and Empatica wearables in collaboration with the DBDP . Where do I start? New to RAPIDS? Check our Overview + FAQ and minimal example Install , configure , and execute RAPIDS to extract and plot behavioral features Bugs should be reported on Github issues Questions, discussions, feature requests, and feedback can be posted on our Github discussions Keep up to date with our Twitter feed or Slack channel Do you want to modify or add new functionality to RAPIDS? Check our contributing guide Are you upgrading from RAPIDS 0.4.x or older? Follow this guide","title":"Welcome to RAPIDS documentation"},{"location":"#what-are-the-benefits-of-using-rapids","text":"Consistent analysis . Every participant sensor dataset is analyzed in the same way and isolated from each other. Efficient analysis . Every analysis step is executed only once. Whenever your data or configuration changes, only the affected files are updated. Parallel execution . Thanks to Snakemake, your analysis can be executed over multiple cores without changing your code. Code-free features . Extract any of the behavioral features offered by RAPIDS without writing any code. Extensible code . You can easily add your own data streams or behavioral features in R or Python, share them with the community, and keep authorship and citations. Time zone aware . Your data is adjusted to one or more time zones per participant. Flexible time segments . You can extract behavioral features on time windows of any length (e.g., 5 minutes, 3 hours, 2 days), on every day or particular days (e.g., weekends, Mondays, the 1 st of each month, etc.), or around events of interest (e.g., surveys or clinical relapses). Tested code . We are continually adding tests to make sure our behavioral features are correct. Reproducible code . If you structure your analysis within RAPIDS, you can be sure your code will run in other computers as intended, thanks to R and Python virtual environments. You can share your analysis code along with your publications without any overhead. Private . All your data is processed locally.","title":"What are the benefits of using RAPIDS?"},{"location":"#users-and-contributors","text":"Community Contributors Many thanks to our community contributions and the whole team : Agam Kumar (CMU) Yasaman S. Sefidgar (University of Washington) Joe Kim (Duke University) Brinnae Bent (Duke University) Stephen Price (CMU) Neil Singh (University of Virginia) Many thanks to the researchers that made their work open source: Panda et al. paper Stachl et al. paper Doryab et al. paper Barnett et al. paper Canzian et al. paper Publications using RAPIDS Predicting Symptoms of Depression and Anxiety Using Smartphone and Wearable Data link Predicting Depression from Smartphone Behavioral Markers Using Machine Learning Methods, Hyper-parameter Optimization, and Feature Importance Analysis: An Exploratory Study link Digital Biomarkers of Symptom Burden Self-Reported by Perioperative Patients Undergoing Pancreatic Surgery: Prospective Longitudinal Study link An Automated Machine Learning Pipeline for Monitoring and Forecasting Mobile Health Data link","title":"Users and Contributors"},{"location":"change-log/","text":"Change Log \u00b6 v1.3.0 \u00b6 Refactor PHONE_LOCATIONS DORYAB provider. Fix bugs and faster execution up to 30x New PHONE_KEYBOARD features Add a new strategy to infer home location that can handle multiple homes for the same participant Add module to exclude sleep episodes from steps intraday features Fix PID matching when joining data from multiple participants. Now, we can handle PIDS with an arbitrary format. Fix bug that did not correctly parse participants with more than 2 phones or more than 1 wearable Fix crash when no phone data yield is needed to process location data (ALL & GPS location providers) Remove location rows with the same timestamp based on their accuracy Fix PHONE_CONVERSATION bug that produced inaccurate ratio features when time segments were not daily. Other minor bug fixes v1.2.0 \u00b6 Sleep summary and intraday features are more consistent. Add wake and bedtime features for sleep summary data. Fix bugs with sleep PRICE features. Update home page Add contributing guide v1.1.1 \u00b6 Fix length of periodic segments on days with DLS Fix crash when scraping data for an app that does not exist Add tests for phone screen data v1.1.0 \u00b6 Add Fitbit calories intraday features v1.0.1 \u00b6 Fix crash in chunk_episodes of utils.py for multi time zone data Fix crash in BT Doryab provider when the number of clusters is 2 Fix Fitbit multi time zone inference from phone data (simplify) Fix missing columns when the input for phone data yield is empty Fix wrong date time labels for event segments for multi time zone data (all labels are computed based on a single tz) Fix periodic segment crash when there are no segments to assign (only affects wday, mday, qday, or yday) Fix crash in Analysis Workflow with new suffix in segments\u2019 labels v1.0.0 \u00b6 Add a new Overview page. You can extend RAPIDS with your own data streams . Data streams are data collected with other sensing apps besides AWARE (like Beiwe, mindLAMP), and stored in other data containers (databases, files) besides MySQL. Support to analyze Empatica wearable data (thanks to Joe Kim and Brinnae Bent from the DBDP ) Support to analyze AWARE data stored in CSV files and InfluxDB databases Support to analyze data collected over multiple time zones Support for sleep intraday features from the core team and also from the community (thanks to Stephen Price) Users can comment on the documentation (powered by utterances). SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT . Add RAPIDS new logo Move Citation and Minimal Example page to the Setup section Add config.yaml validation schema and documentation. Now it\u2019s more difficult to modify the config.yaml file with invalid values. Add new time at home Doryab location feature Add and home coordinates to the location data file so location providers can build features based on it. If you are migrating from RAPIDS 0.4.3 or older, check this guide v0.4.3 \u00b6 Fix bug when any of the rows from any sensor do not belong a time segment v0.4.2 \u00b6 Update battery testing Fix location processing bug when certain columns don\u2019t exist Fix HR intraday bug when minutesonZONE features were 0 Update FAQs Fix HR summary bug when restinghr=0 (ignore those rows) Fix ROG, location entropy and normalized entropy in Doryab location provider Remove sampling frequency dependance in Doryab location provider Update documentation of Doryab location provider Add new FITBIT_DATA_YIELD RAPIDS provider Deprecate Doryab circadian movement feature until it is fixed v0.4.1 \u00b6 Fix bug when no error message was displayed for an empty [PHONE_DATA_YIELD][SENSORS] when resampling location data v0.4.0 \u00b6 Add four new phone sensors that can be used for PHONE_DATA_YIELD Add code so new feature providers can be added for the new four sensors Add new clustering algorithm (OPTICS) for Doryab features Update default EPS parameter for Doryab location clustering Add clearer error message for invalid phone data yield sensors Add ALL_RESAMPLED flag and accuracy limit for location features Add FAQ about null characters in phone tables Reactivate light and wifi tests and update testing docs Fix bug when parsing Fitbit steps data Fix bugs when merging features from empty time segments Fix minor issues in the documentation v0.3.2 \u00b6 Update docker and linux instructions to use RSPM binary repo for for faster installation Update CI to create a release on a tagged push that passes the tests Clarify in DB credential configuration that we only support MySQL Add Windows installation instructions Fix bugs in the create_participants_file script Fix bugs in Fitbit data parsing. Fixed Doryab location features context of clustering. Fixed the wrong shifting while calculating distance in Doryab location features. Refactored the haversine function v0.3.1 \u00b6 Update installation docs for RAPIDS\u2019 docker container Fix example analysis use of accelerometer data in a plot Update FAQ Update minimal example documentation Minor doc updates v0.3.0 \u00b6 Update R and Python virtual environments Add GH actions CI support for tests and docker Add release and test badges to README v0.2.6 \u00b6 Fix old versions banner on nested pages v0.2.5 \u00b6 Fix docs deploy typo v0.2.4 \u00b6 Fix broken links in landing page and docs deploy v0.2.3 \u00b6 Fix participant IDS in the example analysis workflow v0.2.2 \u00b6 Fix readme link to docs v0.2.1 \u00b6 FIx link to the most recent version in the old version banner v0.2.0 \u00b6 Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item v0.1.0 \u00b6 New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"Change Log"},{"location":"change-log/#change-log","text":"","title":"Change Log"},{"location":"change-log/#v130","text":"Refactor PHONE_LOCATIONS DORYAB provider. Fix bugs and faster execution up to 30x New PHONE_KEYBOARD features Add a new strategy to infer home location that can handle multiple homes for the same participant Add module to exclude sleep episodes from steps intraday features Fix PID matching when joining data from multiple participants. Now, we can handle PIDS with an arbitrary format. Fix bug that did not correctly parse participants with more than 2 phones or more than 1 wearable Fix crash when no phone data yield is needed to process location data (ALL & GPS location providers) Remove location rows with the same timestamp based on their accuracy Fix PHONE_CONVERSATION bug that produced inaccurate ratio features when time segments were not daily. Other minor bug fixes","title":"v1.3.0"},{"location":"change-log/#v120","text":"Sleep summary and intraday features are more consistent. Add wake and bedtime features for sleep summary data. Fix bugs with sleep PRICE features. Update home page Add contributing guide","title":"v1.2.0"},{"location":"change-log/#v111","text":"Fix length of periodic segments on days with DLS Fix crash when scraping data for an app that does not exist Add tests for phone screen data","title":"v1.1.1"},{"location":"change-log/#v110","text":"Add Fitbit calories intraday features","title":"v1.1.0"},{"location":"change-log/#v101","text":"Fix crash in chunk_episodes of utils.py for multi time zone data Fix crash in BT Doryab provider when the number of clusters is 2 Fix Fitbit multi time zone inference from phone data (simplify) Fix missing columns when the input for phone data yield is empty Fix wrong date time labels for event segments for multi time zone data (all labels are computed based on a single tz) Fix periodic segment crash when there are no segments to assign (only affects wday, mday, qday, or yday) Fix crash in Analysis Workflow with new suffix in segments\u2019 labels","title":"v1.0.1"},{"location":"change-log/#v100","text":"Add a new Overview page. You can extend RAPIDS with your own data streams . Data streams are data collected with other sensing apps besides AWARE (like Beiwe, mindLAMP), and stored in other data containers (databases, files) besides MySQL. Support to analyze Empatica wearable data (thanks to Joe Kim and Brinnae Bent from the DBDP ) Support to analyze AWARE data stored in CSV files and InfluxDB databases Support to analyze data collected over multiple time zones Support for sleep intraday features from the core team and also from the community (thanks to Stephen Price) Users can comment on the documentation (powered by utterances). SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT . Add RAPIDS new logo Move Citation and Minimal Example page to the Setup section Add config.yaml validation schema and documentation. Now it\u2019s more difficult to modify the config.yaml file with invalid values. Add new time at home Doryab location feature Add and home coordinates to the location data file so location providers can build features based on it. If you are migrating from RAPIDS 0.4.3 or older, check this guide","title":"v1.0.0"},{"location":"change-log/#v043","text":"Fix bug when any of the rows from any sensor do not belong a time segment","title":"v0.4.3"},{"location":"change-log/#v042","text":"Update battery testing Fix location processing bug when certain columns don\u2019t exist Fix HR intraday bug when minutesonZONE features were 0 Update FAQs Fix HR summary bug when restinghr=0 (ignore those rows) Fix ROG, location entropy and normalized entropy in Doryab location provider Remove sampling frequency dependance in Doryab location provider Update documentation of Doryab location provider Add new FITBIT_DATA_YIELD RAPIDS provider Deprecate Doryab circadian movement feature until it is fixed","title":"v0.4.2"},{"location":"change-log/#v041","text":"Fix bug when no error message was displayed for an empty [PHONE_DATA_YIELD][SENSORS] when resampling location data","title":"v0.4.1"},{"location":"change-log/#v040","text":"Add four new phone sensors that can be used for PHONE_DATA_YIELD Add code so new feature providers can be added for the new four sensors Add new clustering algorithm (OPTICS) for Doryab features Update default EPS parameter for Doryab location clustering Add clearer error message for invalid phone data yield sensors Add ALL_RESAMPLED flag and accuracy limit for location features Add FAQ about null characters in phone tables Reactivate light and wifi tests and update testing docs Fix bug when parsing Fitbit steps data Fix bugs when merging features from empty time segments Fix minor issues in the documentation","title":"v0.4.0"},{"location":"change-log/#v032","text":"Update docker and linux instructions to use RSPM binary repo for for faster installation Update CI to create a release on a tagged push that passes the tests Clarify in DB credential configuration that we only support MySQL Add Windows installation instructions Fix bugs in the create_participants_file script Fix bugs in Fitbit data parsing. Fixed Doryab location features context of clustering. Fixed the wrong shifting while calculating distance in Doryab location features. Refactored the haversine function","title":"v0.3.2"},{"location":"change-log/#v031","text":"Update installation docs for RAPIDS\u2019 docker container Fix example analysis use of accelerometer data in a plot Update FAQ Update minimal example documentation Minor doc updates","title":"v0.3.1"},{"location":"change-log/#v030","text":"Update R and Python virtual environments Add GH actions CI support for tests and docker Add release and test badges to README","title":"v0.3.0"},{"location":"change-log/#v026","text":"Fix old versions banner on nested pages","title":"v0.2.6"},{"location":"change-log/#v025","text":"Fix docs deploy typo","title":"v0.2.5"},{"location":"change-log/#v024","text":"Fix broken links in landing page and docs deploy","title":"v0.2.4"},{"location":"change-log/#v023","text":"Fix participant IDS in the example analysis workflow","title":"v0.2.3"},{"location":"change-log/#v022","text":"Fix readme link to docs","title":"v0.2.2"},{"location":"change-log/#v021","text":"FIx link to the most recent version in the old version banner","title":"v0.2.1"},{"location":"change-log/#v020","text":"Add new PHONE_BLUETOOTH DORYAB provider Deprecate PHONE_BLUETOOTH RAPIDS provider Fix bug in filter_data_by_segment for Python when dataset was empty Minor doc updates New FAQ item","title":"v0.2.0"},{"location":"change-log/#v010","text":"New and more consistent docs (this website). The previous docs are marked as beta Consolidate configuration instructions Flexible time segments Simplify Fitbit behavioral feature extraction and documentation Sensor\u2019s configuration and output is more consistent Update visualizations to handle flexible day segments Create a RAPIDS execution script that allows re-computation of the pipeline after configuration changes Add citation guide Update virtual environment guide Update analysis workflow example Add a Code of Conduct Update Team page","title":"v0.1.0"},{"location":"citation/","text":"Cite RAPIDS and providers \u00b6 RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you! RAPIDS \u00b6 If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246 DBDP (all Empatica sensors) \u00b6 If you computed features using the provider [DBDP] of any of the Empatica sensors (accelerometer, heart rate, temperature, EDA, BVP, IBI, tags) cite this paper in addition to RAPIDS. Bent et al. citation Bent, B., Wang, K., Grzesiak, E., Jiang, C., Qi, Y., Jiang, Y., Cho, P., Zingler, K., Ogbeide, F.I., Zhao, A., Runge, R., Sim, I., Dunn, J. (2020). The Digital Biomarker Discovery Pipeline: An open source software platform for the development of digital biomarkers using mHealth and wearables data. Journal of Clinical and Translational Science, 1-28. doi:10.1017/cts.2020.511 Panda (accelerometer) \u00b6 If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047. Stachl (applications foreground) \u00b6 If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117 Doryab (bluetooth) \u00b6 If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Barnett (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845 Doryab (locations) \u00b6 If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. In addition, if you used the SUN_LI_VEGA_STRATEGY strategy, cite this paper as well. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845 Sun et al. citation Sun S, Folarin AA, Ranjan Y, Rashid Z, Conde P, Stewart C, Cummins N, Matcham F, Dalla Costa G, Simblett S, Leocani L, Lamers F, S\u00f8rensen PS, Buron M, Zabalza A, Guerrero P\u00e9rez AI, Penninx BW, Siddi S, Haro JM, Myin-Germeys I, Rintala A, Wykes T, Narayan VA, Comi G, Hotopf M, Dobson RJ, RADAR-CNS Consortium. Using Smartphones and Wearable Devices to Monitor Behavioral Changes During COVID-19. J Med Internet Res 2020;22(9):e19992","title":"Citation"},{"location":"citation/#cite-rapids-and-providers","text":"RAPIDS and the community RAPIDS is a community effort and as such we want to continue recognizing the contributions from other researchers. Besides citing RAPIDS, we ask you to cite any of the authors listed below if you used those sensor providers in your analysis, thank you!","title":"Cite RAPIDS and providers"},{"location":"citation/#rapids","text":"If you used RAPIDS, please cite this paper . RAPIDS et al. citation Vega J, Li M, Aguillera K, Goel N, Joshi E, Durica KC, Kunta AR, Low CA RAPIDS: Reproducible Analysis Pipeline for Data Streams Collected with Mobile Devices JMIR Preprints. 18/08/2020:23246 DOI: 10.2196/preprints.23246 URL: https://preprints.jmir.org/preprint/23246","title":"RAPIDS"},{"location":"citation/#dbdp-all-empatica-sensors","text":"If you computed features using the provider [DBDP] of any of the Empatica sensors (accelerometer, heart rate, temperature, EDA, BVP, IBI, tags) cite this paper in addition to RAPIDS. Bent et al. citation Bent, B., Wang, K., Grzesiak, E., Jiang, C., Qi, Y., Jiang, Y., Cho, P., Zingler, K., Ogbeide, F.I., Zhao, A., Runge, R., Sim, I., Dunn, J. (2020). The Digital Biomarker Discovery Pipeline: An open source software platform for the development of digital biomarkers using mHealth and wearables data. Journal of Clinical and Translational Science, 1-28. doi:10.1017/cts.2020.511","title":"DBDP (all Empatica sensors)"},{"location":"citation/#panda-accelerometer","text":"If you computed accelerometer features using the provider [PHONE_ACCLEROMETER][PANDA] cite this paper in addition to RAPIDS. Panda et al. citation Panda N, Solsky I, Huang EJ, Lipsitz S, Pradarelli JC, Delisle M, Cusack JC, Gadd MA, Lubitz CC, Mullen JT, Qadan M, Smith BL, Specht M, Stephen AE, Tanabe KK, Gawande AA, Onnela JP, Haynes AB. Using Smartphones to Capture Novel Recovery Metrics After Cancer Surgery. JAMA Surg. 2020 Feb 1;155(2):123-129. doi: 10.1001/jamasurg.2019.4702. PMID: 31657854; PMCID: PMC6820047.","title":"Panda (accelerometer)"},{"location":"citation/#stachl-applications-foreground","text":"If you computed applications foreground features using the app category (genre) catalogue in [PHONE_APPLICATIONS_FOREGROUND][RAPIDS] cite this paper in addition to RAPIDS. Stachl et al. citation Clemens Stachl, Quay Au, Ramona Schoedel, Samuel D. Gosling, Gabriella M. Harari, Daniel Buschek, Sarah Theres V\u00f6lkel, Tobias Schuwerk, Michelle Oldemeier, Theresa Ullmann, Heinrich Hussmann, Bernd Bischl, Markus B\u00fchner. Proceedings of the National Academy of Sciences Jul 2020, 117 (30) 17680-17687; DOI: 10.1073/pnas.1920484117","title":"Stachl (applications foreground)"},{"location":"citation/#doryab-bluetooth","text":"If you computed bluetooth features using the provider [PHONE_BLUETOOTH][DORYAB] cite this paper in addition to RAPIDS. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394","title":"Doryab (bluetooth)"},{"location":"citation/#barnett-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][BARNETT] cite this paper and this paper in addition to RAPIDS. Barnett et al. citation Ian Barnett, Jukka-Pekka Onnela, Inferring mobility measures from GPS traces with missing data, Biostatistics, Volume 21, Issue 2, April 2020, Pages e98\u2013e112, https://doi.org/10.1093/biostatistics/kxy059 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845","title":"Barnett (locations)"},{"location":"citation/#doryab-locations","text":"If you computed locations features using the provider [PHONE_LOCATIONS][DORYAB] cite this paper and this paper in addition to RAPIDS. In addition, if you used the SUN_LI_VEGA_STRATEGY strategy, cite this paper as well. Doryab et al. citation Doryab, A., Chikarsel, P., Liu, X., & Dey, A. K. (2019). Extraction of Behavioral Features from Smartphone and Wearable Data. ArXiv:1812.10394 [Cs, Stat]. http://arxiv.org/abs/1812.10394 Canzian et al. citation Luca Canzian and Mirco Musolesi. 2015. Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobility traces analysis. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp \u201815). Association for Computing Machinery, New York, NY, USA, 1293\u20131304. DOI: https://doi.org/10.1145/2750858.2805845 Sun et al. citation Sun S, Folarin AA, Ranjan Y, Rashid Z, Conde P, Stewart C, Cummins N, Matcham F, Dalla Costa G, Simblett S, Leocani L, Lamers F, S\u00f8rensen PS, Buron M, Zabalza A, Guerrero P\u00e9rez AI, Penninx BW, Siddi S, Haro JM, Myin-Germeys I, Rintala A, Wykes T, Narayan VA, Comi G, Hotopf M, Dobson RJ, RADAR-CNS Consortium. Using Smartphones and Wearable Devices to Monitor Behavioral Changes During COVID-19. J Med Internet Res 2020;22(9):e19992","title":"Doryab (locations)"},{"location":"code_of_conduct/","text":"Contributor Covenant Code of Conduct \u00b6 Our Pledge \u00b6 We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. Our Standards \u00b6 Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting Enforcement Responsibilities \u00b6 Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. Scope \u00b6 This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Enforcement \u00b6 Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. Enforcement Guidelines \u00b6 Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 1. Correction \u00b6 Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 2. Warning \u00b6 Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 3. Temporary Ban \u00b6 Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 4. Permanent Ban \u00b6 Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community. Attribution \u00b6 This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Code of Conduct"},{"location":"code_of_conduct/#contributor-covenant-code-of-conduct","text":"","title":"Contributor Covenant Code of Conduct"},{"location":"code_of_conduct/#our-pledge","text":"We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.","title":"Our Pledge"},{"location":"code_of_conduct/#our-standards","text":"Examples of behavior that contributes to a positive environment for our community include: Demonstrating empathy and kindness toward other people Being respectful of differing opinions, viewpoints, and experiences Giving and gracefully accepting constructive feedback Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: The use of sexualized language or imagery, and sexual attention or advances of any kind Trolling, insulting or derogatory comments, and personal or political attacks Public or private harassment Publishing others\u2019 private information, such as a physical or email address, without their explicit permission Other conduct which could reasonably be considered inappropriate in a professional setting","title":"Our Standards"},{"location":"code_of_conduct/#enforcement-responsibilities","text":"Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.","title":"Enforcement Responsibilities"},{"location":"code_of_conduct/#scope","text":"This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.","title":"Scope"},{"location":"code_of_conduct/#enforcement","text":"Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at moshi@pitt.edu . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident.","title":"Enforcement"},{"location":"code_of_conduct/#enforcement-guidelines","text":"Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:","title":"Enforcement Guidelines"},{"location":"code_of_conduct/#1-correction","text":"Community Impact : Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. Consequence : A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.","title":"1. Correction"},{"location":"code_of_conduct/#2-warning","text":"Community Impact : A violation through a single incident or series of actions. Consequence : A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.","title":"2. Warning"},{"location":"code_of_conduct/#3-temporary-ban","text":"Community Impact : A serious violation of community standards, including sustained inappropriate behavior. Consequence : A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.","title":"3. Temporary Ban"},{"location":"code_of_conduct/#4-permanent-ban","text":"Community Impact : Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. Consequence : A permanent ban from any sort of public interaction within the community.","title":"4. Permanent Ban"},{"location":"code_of_conduct/#attribution","text":"This Code of Conduct is adapted from the Contributor Covenant , version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html . Community Impact Guidelines were inspired by Mozilla\u2019s code of conduct enforcement ladder . For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq . Translations are available at https://www.contributor-covenant.org/translations .","title":"Attribution"},{"location":"common-errors/","text":"Common Errors \u00b6 Cannot connect to your MySQL server \u00b6 Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env . Cannot start mysql in linux via brew services start mysql \u00b6 Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start Every time I run force the download_dataset rule all rules are executed \u00b6 Problem When running snakemake -j1 -R pull_phone_data or ./rapids -j1 -R pull_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed. Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule. \u00b6 Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [CONTAINER] of each sensor you activated in config.yaml match your database tables or files. How do I install RAPIDS on Ubuntu 16.04 \u00b6 Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion. mysql.h cannot be found \u00b6 Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev No package libcurl found \u00b6 Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev Configuration failed because openssl was not found. \u00b6 Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev Configuration failed because libxml-2.0 was not found \u00b6 Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev SSL connection error when running RAPIDS \u00b6 Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here DB_TABLES key not found \u00b6 Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : [] Error while updating your conda environment in Ubuntu \u00b6 Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda Embedded nul in string \u00b6 Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: Remove the the null character from the conflictive table cell(s). You can adapt the following query on a MySQL server 8.0 or older update YOUR_TABLE set YOUR_COLUMN = regexp_replace ( YOUR_COLUMN , '\\0' , '' ); If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Linux: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/streams/pull_phone_data.R or src/data/streams/pull_fitbit_data.R and replace library(RMariaDB) with library(RMySQL) In the same file(s) replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group) There is no package called RMariaDB \u00b6 Problem You get the following error when executing RAPIDS: Error in library ( RMariaDB ) : there is no package called 'RMariaDB' Execution halted Solution In RAPIDS v0.1.0 we replaced RMySQL R package with RMariaDB , this error means your R virtual environment is out of date, to update it run snakemake -j1 renv_restore Unrecognized output timezone \u201cAmerica/New_York\u201d \u00b6 Problem When running RAPIDS with R 4.0.3 on MacOS on M1, lubridate may throw an error associated with the timezone. Error in C_force_tz ( time, tz = tzone, roll ) : CCTZ: Unrecognized output timezone: \"America/New_York\" Calls: get_timestamp_filter ... .parse_date_time -> .strptime -> force_tz -> C_force_tz Solution This is because R timezone library is not set. Please add Sys.setenv(\u201cTZDIR\u201d = file.path(R.home(), \u201cshare\u201d, \u201czoneinfo\u201d)) to the file active.R in renv folder to set the timezone library. For further details on how to test if TZDIR is properly set, please refer to https://github.com/tidyverse/lubridate/issues/928#issuecomment-720059233 . Unimplemented MAX_NO_FIELD_TYPES \u00b6 Problem You get the following error when downloading Fitbit data: Error: Unimplemented MAX_NO_FIELD_TYPES Execution halted Solution At the moment RMariaDB cannot handle MySQL columns of JSON type. Change the type of your Fitbit data column to longtext (note that the content will not change and will still be a JSON object just interpreted as a string). Running RAPIDS on Apple Silicon M1 Mac \u00b6 Problem You get the following error when installing pandoc or running rapids: MoSHI/rapids/renv/staging/1/00LOCK-KernSmooth/00new/KernSmooth/libs/KernSmooth.so: mach-0, but wrong architecture Solution As of Feb 2020 in M1 macs, R needs to be installed via brew under Rosetta (x86 arch) due to some incompatibility with selected R libraries. To do this, run your terminal via Rosetta , then proceed with the usual brew installation command. x86 homebrew should be installed in /usr/local/bin/brew , you can check which brew you are using by typing which brew . Then use x86 homebrew to install R and restore RAPIDS packages ( renv_restore ).","title":"Common Errors"},{"location":"common-errors/#common-errors","text":"","title":"Common Errors"},{"location":"common-errors/#cannot-connect-to-your-mysql-server","text":"Problem **Error in .local ( drv, \\. .. ) :** **Failed to connect to database: Error: Can \\' t initialize character set unknown ( path: compiled \\_ in ) ** : Calls: dbConnect -> dbConnect -> .local -> .Call Execution halted [ Tue Mar 10 19 :40:15 2020 ] Error in rule download_dataset: jobid: 531 output: data/raw/p60/locations_raw.csv RuleException: CalledProcessError in line 20 of /home/ubuntu/rapids/rules/preprocessing.snakefile: Command 'set -euo pipefail; Rscript --vanilla /home/ubuntu/rapids/.snakemake/scripts/tmp_2jnvqs7.download_dataset.R' returned non-zero exit status 1 . File \"/home/ubuntu/rapids/rules/preprocessing.snakefile\" , line 20 , in __rule_download_dataset File \"/home/ubuntu/anaconda3/envs/moshi-env/lib/python3.7/concurrent/futures/thread.py\" , line 57 , in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Solution Please make sure the DATABASE_GROUP in config.yaml matches your DB credentials group in .env .","title":"Cannot connect to your MySQL server"},{"location":"common-errors/#cannot-start-mysql-in-linux-via-brew-services-start-mysql","text":"Problem Cannot start mysql in linux via brew services start mysql Solution Use mysql.server start","title":"Cannot start mysql in linux via brew services start mysql"},{"location":"common-errors/#every-time-i-run-force-the-download_dataset-rule-all-rules-are-executed","text":"Problem When running snakemake -j1 -R pull_phone_data or ./rapids -j1 -R pull_phone_data all the rules and files are re-computed Solution This is expected behavior. The advantage of using snakemake under the hood is that every time a file containing data is modified every rule that depends on that file will be re-executed to update their results. In this case, since download_dataset updates all the raw data, and you are forcing the rule with the flag -R every single rule that depends on those raw files will be executed.","title":"Every time I run force the download_dataset rule all rules are executed"},{"location":"common-errors/#error-table-xxx-doesnt-exist-while-running-the-download_phone_data-or-download_fitbit_data-rule","text":"Problem Error in .local ( conn, statement, ... ) : could not run statement: Table 'db_name.table_name' doesn ' t exist Calls: colnames ... .local -> dbSendQuery -> dbSendQuery -> .local -> .Call Execution halted Solution Please make sure the sensors listed in [PHONE_VALID_SENSED_BINS][PHONE_SENSORS] and the [CONTAINER] of each sensor you activated in config.yaml match your database tables or files.","title":"Error Table XXX doesn't exist while running the download_phone_data or download_fitbit_data rule."},{"location":"common-errors/#how-do-i-install-rapids-on-ubuntu-1604","text":"Solution Install dependencies (Homebrew - if not installed): sudo apt-get install libmariadb-client-lgpl-dev libxml2-dev libssl-dev Install brew for linux and add the following line to ~/.bashrc : export PATH=$HOME/.linuxbrew/bin:$PATH source ~/.bashrc Install MySQL brew install mysql brew services start mysql Install R, pandoc and rmarkdown: brew install r brew install gcc@6 (needed due to this bug ) HOMEBREW_CC=gcc-6 brew install pandoc Install miniconda using these instructions Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install R packages and virtual environment: snakemake renv_install snakemake renv_init snakemake renv_restore This step could take several minutes to complete. Please be patient and let it run until completion.","title":"How do I install RAPIDS on Ubuntu 16.04"},{"location":"common-errors/#mysqlh-cannot-be-found","text":"Problem -------------------------- [ ERROR MESSAGE ] ---------------------------- :1:10: fatal error: mysql.h: No such file or directory compilation terminated. ----------------------------------------------------------------------- ERROR: configuration failed for package 'RMySQL' Solution sudo apt install libmariadbclient-dev","title":"mysql.h cannot be found"},{"location":"common-errors/#no-package-libcurl-found","text":"Problem libcurl cannot be found Solution Install libcurl sudo apt install libcurl4-openssl-dev","title":"No package libcurl found"},{"location":"common-errors/#configuration-failed-because-openssl-was-not-found","text":"Problem openssl cannot be found Solution Install openssl sudo apt install libssl-dev","title":"Configuration failed because openssl was not found."},{"location":"common-errors/#configuration-failed-because-libxml-20-was-not-found","text":"Problem libxml-2.0 cannot be found Solution Install libxml-2.0 sudo apt install libxml2-dev","title":"Configuration failed because libxml-2.0 was not found"},{"location":"common-errors/#ssl-connection-error-when-running-rapids","text":"Problem You are getting the following error message when running RAPIDS: Error: Failed to connect: SSL connection error: error:1425F102:SSL routines:ssl_choose_client_version:unsupported protocol. Solution This is a bug in Ubuntu 20.04 when trying to connect to an old MySQL server with MySQL client 8.0. You should get the same error message if you try to connect from the command line. There you can add the option --ssl-mode=DISABLED but we can't do this from the R connector. If you can't update your server, the quickest solution would be to import your database to another server or to a local environment. Alternatively, you could replace mysql-client and libmysqlclient-dev with mariadb-client and libmariadbclient-dev and reinstall renv. More info about this issue here","title":"SSL connection error when running RAPIDS"},{"location":"common-errors/#db_tables-key-not-found","text":"Problem If you get the following error KeyError in line 43 of preprocessing.smk: 'PHONE_SENSORS' , it means that the indentation of the key [PHONE_SENSORS] is not matching the other child elements of PHONE_VALID_SENSED_BINS Solution You need to add or remove any leading whitespaces as needed on that line. PHONE_VALID_SENSED_BINS : COMPUTE : False # This flag is automatically ignored (set to True) if you are extracting PHONE_VALID_SENSED_DAYS or screen or Barnett's location features BIN_SIZE : &bin_size 5 # (in minutes) PHONE_SENSORS : []","title":"DB_TABLES key not found"},{"location":"common-errors/#error-while-updating-your-conda-environment-in-ubuntu","text":"Problem You get the following error: CondaMultiError: CondaVerificationError: The package for tk located at /home/ubuntu/miniconda2/pkgs/tk-8.6.9-hed695b0_1003 appears to be corrupted. The path 'include/mysqlStubs.h' specified in the package manifest cannot be found. ClobberError: This transaction has incompatible packages due to a shared path. packages: conda-forge/linux-64::llvm-openmp-10.0.0-hc9558a2_0, anaconda/linux-64::intel-openmp-2019.4-243 path: 'lib/libiomp5.so' Solution Reinstall conda","title":"Error while updating your conda environment in Ubuntu"},{"location":"common-errors/#embedded-nul-in-string","text":"Problem You get the following error when downloading sensor data: Error in result_fetch ( res@ptr, n = n ) : embedded nul in string: Solution This problem is due to the way RMariaDB handles a mismatch between data types in R and MySQL (see this issue ). Since it seems this problem won\u2019t be handled by RMariaDB , you have two options: Remove the the null character from the conflictive table cell(s). You can adapt the following query on a MySQL server 8.0 or older update YOUR_TABLE set YOUR_COLUMN = regexp_replace ( YOUR_COLUMN , '\\0' , '' ); If it\u2019s not feasible to modify your data you can try swapping RMariaDB with RMySQL . Just have in mind you might have problems connecting to modern MySQL servers running in Linux: Add RMySQL to the renv environment by running the following command in a terminal open on RAPIDS root folder R -e 'renv::install(\"RMySQL\")' Go to src/data/streams/pull_phone_data.R or src/data/streams/pull_fitbit_data.R and replace library(RMariaDB) with library(RMySQL) In the same file(s) replace dbEngine <- dbConnect(MariaDB(), default.file = \"./.env\", group = group) with dbEngine <- dbConnect(MySQL(), default.file = \"./.env\", group = group)","title":"Embedded nul in string"},{"location":"common-errors/#there-is-no-package-called-rmariadb","text":"Problem You get the following error when executing RAPIDS: Error in library ( RMariaDB ) : there is no package called 'RMariaDB' Execution halted Solution In RAPIDS v0.1.0 we replaced RMySQL R package with RMariaDB , this error means your R virtual environment is out of date, to update it run snakemake -j1 renv_restore","title":"There is no package called RMariaDB"},{"location":"common-errors/#unrecognized-output-timezone-americanew_york","text":"Problem When running RAPIDS with R 4.0.3 on MacOS on M1, lubridate may throw an error associated with the timezone. Error in C_force_tz ( time, tz = tzone, roll ) : CCTZ: Unrecognized output timezone: \"America/New_York\" Calls: get_timestamp_filter ... .parse_date_time -> .strptime -> force_tz -> C_force_tz Solution This is because R timezone library is not set. Please add Sys.setenv(\u201cTZDIR\u201d = file.path(R.home(), \u201cshare\u201d, \u201czoneinfo\u201d)) to the file active.R in renv folder to set the timezone library. For further details on how to test if TZDIR is properly set, please refer to https://github.com/tidyverse/lubridate/issues/928#issuecomment-720059233 .","title":"Unrecognized output timezone \"America/New_York\""},{"location":"common-errors/#unimplemented-max_no_field_types","text":"Problem You get the following error when downloading Fitbit data: Error: Unimplemented MAX_NO_FIELD_TYPES Execution halted Solution At the moment RMariaDB cannot handle MySQL columns of JSON type. Change the type of your Fitbit data column to longtext (note that the content will not change and will still be a JSON object just interpreted as a string).","title":"Unimplemented MAX_NO_FIELD_TYPES"},{"location":"common-errors/#running-rapids-on-apple-silicon-m1-mac","text":"Problem You get the following error when installing pandoc or running rapids: MoSHI/rapids/renv/staging/1/00LOCK-KernSmooth/00new/KernSmooth/libs/KernSmooth.so: mach-0, but wrong architecture Solution As of Feb 2020 in M1 macs, R needs to be installed via brew under Rosetta (x86 arch) due to some incompatibility with selected R libraries. To do this, run your terminal via Rosetta , then proceed with the usual brew installation command. x86 homebrew should be installed in /usr/local/bin/brew , you can check which brew you are using by typing which brew . Then use x86 homebrew to install R and restore RAPIDS packages ( renv_restore ).","title":"Running RAPIDS on Apple Silicon M1 Mac"},{"location":"contributing/","text":"Contributing \u00b6 Thank you for taking the time to contribute! All changes, small or big, are welcome, and regardless of who you are, we are always happy to work together to make your contribution as strong as possible. We follow the Covenant Code of Conduct , so we ask you to uphold it. Be kind to everyone in the community, and please report unacceptable behavior to moshiresearch@gmail.com . Questions, Feature Requests, and Discussions \u00b6 Post any questions, feature requests, or discussions in our GitHub Discussions tab . Bug Reports \u00b6 Report any bugs in our GithHub issue tracker keeping in mind to: Debug and simplify the problem to create a minimal example. For example, reduce the problem to a single participant, sensor, and a few rows of data. Provide a clear and succinct description of the problem (expected behavior vs. actual behavior). Attach your config.yaml , time segments file, and time zones file if appropriate. Attach test data if possible and any screenshots or extra resources that will help us debug the problem. Share the commit you are running: git rev-parse --short HEAD Share your OS version (e.g., Windows 10) Share the device/sensor you are processing (e.g., phone accelerometer) Documentation Contributions \u00b6 If you want to fix a typo or any other minor changes, you can edit the file online by clicking on the pencil icon at the top right of any page and opening a pull request using Github\u2019s website If your changes are more complex, clone RAPIDS\u2019 repository, setup the dev environment for our documentation with this tutorial , and submit any changes on a new feature branch following our git flow . Code Contributions \u00b6 Hints for any code changes To submit any new code, use a new feature branch following our git flow . If you neeed a new Python or R package in RAPIDS\u2019 virtual environments, follow this tutorial If you need to change the config.yaml you will need to update its validation schema with this tutorial New Data Streams \u00b6 New data containers. If you want to process data from a device RAPIDS supports ( see this table ) but it\u2019s stored in a database engine or file type we don\u2019t support yet, implement a new data stream container and format . You can copy and paste the format.yaml of one of the other streams of the device you are targeting. New sensing apps. If you want to add support for new smartphone sensing apps like Beiwe, implement a new data stream container and format . New wearable devices. If you want to add support for a new wearable, open a Github discussion , so we can add the necessary initial configuration files and code. New Behavioral Features \u00b6 If you want to add new behavioral features for mobile sensors RAPIDS already supports, follow this tutorial . A sensor is supported if it has a configuration section in config.yaml . If you want to add new behavioral features for mobile sensors RAPIDS does not support yet, open a Github discussion , so we can add the necessary initial configuration files and code. New Tests \u00b6 If you want to add new tests for existent behavioral features, follow this tutorial . New Visualizations \u00b6 Open a Github discussion , so we can add the necessary initial configuration files and code.","title":"Contributing"},{"location":"contributing/#contributing","text":"Thank you for taking the time to contribute! All changes, small or big, are welcome, and regardless of who you are, we are always happy to work together to make your contribution as strong as possible. We follow the Covenant Code of Conduct , so we ask you to uphold it. Be kind to everyone in the community, and please report unacceptable behavior to moshiresearch@gmail.com .","title":"Contributing"},{"location":"contributing/#questions-feature-requests-and-discussions","text":"Post any questions, feature requests, or discussions in our GitHub Discussions tab .","title":"Questions, Feature Requests, and Discussions"},{"location":"contributing/#bug-reports","text":"Report any bugs in our GithHub issue tracker keeping in mind to: Debug and simplify the problem to create a minimal example. For example, reduce the problem to a single participant, sensor, and a few rows of data. Provide a clear and succinct description of the problem (expected behavior vs. actual behavior). Attach your config.yaml , time segments file, and time zones file if appropriate. Attach test data if possible and any screenshots or extra resources that will help us debug the problem. Share the commit you are running: git rev-parse --short HEAD Share your OS version (e.g., Windows 10) Share the device/sensor you are processing (e.g., phone accelerometer)","title":"Bug Reports"},{"location":"contributing/#documentation-contributions","text":"If you want to fix a typo or any other minor changes, you can edit the file online by clicking on the pencil icon at the top right of any page and opening a pull request using Github\u2019s website If your changes are more complex, clone RAPIDS\u2019 repository, setup the dev environment for our documentation with this tutorial , and submit any changes on a new feature branch following our git flow .","title":"Documentation Contributions"},{"location":"contributing/#code-contributions","text":"Hints for any code changes To submit any new code, use a new feature branch following our git flow . If you neeed a new Python or R package in RAPIDS\u2019 virtual environments, follow this tutorial If you need to change the config.yaml you will need to update its validation schema with this tutorial","title":"Code Contributions"},{"location":"contributing/#new-data-streams","text":"New data containers. If you want to process data from a device RAPIDS supports ( see this table ) but it\u2019s stored in a database engine or file type we don\u2019t support yet, implement a new data stream container and format . You can copy and paste the format.yaml of one of the other streams of the device you are targeting. New sensing apps. If you want to add support for new smartphone sensing apps like Beiwe, implement a new data stream container and format . New wearable devices. If you want to add support for a new wearable, open a Github discussion , so we can add the necessary initial configuration files and code.","title":"New Data Streams"},{"location":"contributing/#new-behavioral-features","text":"If you want to add new behavioral features for mobile sensors RAPIDS already supports, follow this tutorial . A sensor is supported if it has a configuration section in config.yaml . If you want to add new behavioral features for mobile sensors RAPIDS does not support yet, open a Github discussion , so we can add the necessary initial configuration files and code.","title":"New Behavioral Features"},{"location":"contributing/#new-tests","text":"If you want to add new tests for existent behavioral features, follow this tutorial .","title":"New Tests"},{"location":"contributing/#new-visualizations","text":"Open a Github discussion , so we can add the necessary initial configuration files and code.","title":"New Visualizations"},{"location":"migrating-from-old-versions/","text":"Migration guides \u00b6 Migrating from RAPIDS 0.4.x or older \u00b6 There are four actions that you need to take if you were using RAPIDS 0.4.3 or older ( before Feb 9 th , 2021 ): Check the new Overview page Check the new Overview page. Hopefully, it is a better overview of RAPIDS and provides answers to Frequently Asked Questions. Deploy RAPIDS in a new folder Clone RAPIDS 1.x in a new folder (do not pull the updates in your current folder) Activate your conda environment Install renv again snakemake -j1 renv_install (for Ubuntu take advantage of the platform specific R renv instructions ) Restore renv packages snakemake -j1 renv_restore (for Ubuntu take advantage of the platform specific R renv instructions ) Move your participant files pxx.yaml to the new folder Move your time segment files to the new folder Move your .env file to the new folder Migrate your .env file to the new credentials.yaml format The .env file is not used anymore, the same credential groups are stored in credentials.yaml , migrate your .env file by running: python tools/update_format_env.py Reconfigure your config.yaml Reconfigure your config.yaml file by hand (don\u2019t copy and paste the old one). Some keys and values changed but the defaults should be compatible with the things you know from RAPIDS 0.x (see below). The most relevant changes to RAPIDS that you need to know about are: We introduced the concept of data streams RAPIDS abstracts sensor data logged by different devices, platforms and stored in different data containers as data streams . The default data stream for PHONE is aware_mysql , and the default for FITBIT is fitbitjson_mysql . This is compatible with the old functionality (AWARE and JSON Fitbit data stored in MySQL). These values are set in [PHONE_DATA_STREAMS][USE] and [FITBIT_DATA_STREAMS][USE] . You can add new data stream formats (sensing apps) and containers (database engines, file types, etc.). If you were processing your Fitbit data either in JSON or plain text (parsed) format, and it was stored in MySQL or CSV files, the changes that you made to your raw data will be compatible. Just choose fitbitjson_mysql , fitbitparsed_mysql , fitbitjson_csv , fitbitparsed_csv accordingly and set it in [FITBIT_DATA_STREAMS][USE] . In the future, you will not have to change your raw data; you will be able to just change column mappings/values in the data stream\u2019s format.yaml file. We introduced multiple time zones You can now process data from participants that visited multiple time zones. The default is still a single time zone (America/New_York). See how to handle multiple time zones The keyword multiple is now infer When processing data from smartphones, RAPIDS allows you to infer the OS of a smartphone by using the keyword multiple in the [PLATFORM] key of participant files. Now RAPIDS uses infer instead of multiple Nonetheless, multiple still works for backward compatibility. A global DATABASE_GROUP does not exist anymore There is no global DATABASE_GROUP anymore. Each data stream that needs credentials to connect to a database has its own DATABASE_GROUP config key . The groups are defined in credentials.yaml instead of the .env . [DEVICE_SENSOR][TABLE] is now [DEVICE_SENSOR][CONTAINER] We renamed the keys [DEVICE_SENSOR][TABLE] to [DEVICE_SENSOR][CONTAINER] to reflect that, with the introduction of data streams, they can point to a database table, file, or any other data container. Creating participant files from the AWARE_DEVICE_TABLE is deprecated In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions to create participant files from CSV files : SELECT device_id , device_id as fitbit_id , CONCAT ( \"p\" , _id ) as empatica_id , CONCAT ( \"p\" , _id ) as pid , if ( brand = \"iPhone\" , \"ios\" , \"android\" ) as platform , CONCAT ( \"p\" , _id ) as label , DATE_FORMAT ( FROM_UNIXTIME (( timestamp / 1000 ) - 86400 ), \"%Y-%m-%d\" ) as start_date , CURRENT_DATE as end_date from aware_device order by _id ; SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT The attributes SCR_SCRIPT and SRC_LANGUAGE of every sensor PROVIDER are replaced by SRC_SCRIPT . SRC_SCRIPT is a relative path from the RAPIDS root folder to that provider\u2019s feature script. We did this to simplify and clarify where the features scripts are stored. There are no actions to take unless you created your own feature provider; update it with your feature script path. Migrating from RAPIDS beta \u00b6 If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the RAPIDS 0.1.0 right away, just take into account the following: Deploy RAPIDS in a new folder Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Move your old .env file Move your participant files Migrate your participant files You can migrate your old participant files to the new YAML format: python tools/update_format_participant_files.py Follow the new Configuration guide Follow the new Configuration guide Learn more about the new way to run RAPIDS Get familiar with the new way of Executing RAPIDS","title":"Migrating from an old version"},{"location":"migrating-from-old-versions/#migration-guides","text":"","title":"Migration guides"},{"location":"migrating-from-old-versions/#migrating-from-rapids-04x-or-older","text":"There are four actions that you need to take if you were using RAPIDS 0.4.3 or older ( before Feb 9 th , 2021 ): Check the new Overview page Check the new Overview page. Hopefully, it is a better overview of RAPIDS and provides answers to Frequently Asked Questions. Deploy RAPIDS in a new folder Clone RAPIDS 1.x in a new folder (do not pull the updates in your current folder) Activate your conda environment Install renv again snakemake -j1 renv_install (for Ubuntu take advantage of the platform specific R renv instructions ) Restore renv packages snakemake -j1 renv_restore (for Ubuntu take advantage of the platform specific R renv instructions ) Move your participant files pxx.yaml to the new folder Move your time segment files to the new folder Move your .env file to the new folder Migrate your .env file to the new credentials.yaml format The .env file is not used anymore, the same credential groups are stored in credentials.yaml , migrate your .env file by running: python tools/update_format_env.py Reconfigure your config.yaml Reconfigure your config.yaml file by hand (don\u2019t copy and paste the old one). Some keys and values changed but the defaults should be compatible with the things you know from RAPIDS 0.x (see below). The most relevant changes to RAPIDS that you need to know about are: We introduced the concept of data streams RAPIDS abstracts sensor data logged by different devices, platforms and stored in different data containers as data streams . The default data stream for PHONE is aware_mysql , and the default for FITBIT is fitbitjson_mysql . This is compatible with the old functionality (AWARE and JSON Fitbit data stored in MySQL). These values are set in [PHONE_DATA_STREAMS][USE] and [FITBIT_DATA_STREAMS][USE] . You can add new data stream formats (sensing apps) and containers (database engines, file types, etc.). If you were processing your Fitbit data either in JSON or plain text (parsed) format, and it was stored in MySQL or CSV files, the changes that you made to your raw data will be compatible. Just choose fitbitjson_mysql , fitbitparsed_mysql , fitbitjson_csv , fitbitparsed_csv accordingly and set it in [FITBIT_DATA_STREAMS][USE] . In the future, you will not have to change your raw data; you will be able to just change column mappings/values in the data stream\u2019s format.yaml file. We introduced multiple time zones You can now process data from participants that visited multiple time zones. The default is still a single time zone (America/New_York). See how to handle multiple time zones The keyword multiple is now infer When processing data from smartphones, RAPIDS allows you to infer the OS of a smartphone by using the keyword multiple in the [PLATFORM] key of participant files. Now RAPIDS uses infer instead of multiple Nonetheless, multiple still works for backward compatibility. A global DATABASE_GROUP does not exist anymore There is no global DATABASE_GROUP anymore. Each data stream that needs credentials to connect to a database has its own DATABASE_GROUP config key . The groups are defined in credentials.yaml instead of the .env . [DEVICE_SENSOR][TABLE] is now [DEVICE_SENSOR][CONTAINER] We renamed the keys [DEVICE_SENSOR][TABLE] to [DEVICE_SENSOR][CONTAINER] to reflect that, with the introduction of data streams, they can point to a database table, file, or any other data container. Creating participant files from the AWARE_DEVICE_TABLE is deprecated In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions to create participant files from CSV files : SELECT device_id , device_id as fitbit_id , CONCAT ( \"p\" , _id ) as empatica_id , CONCAT ( \"p\" , _id ) as pid , if ( brand = \"iPhone\" , \"ios\" , \"android\" ) as platform , CONCAT ( \"p\" , _id ) as label , DATE_FORMAT ( FROM_UNIXTIME (( timestamp / 1000 ) - 86400 ), \"%Y-%m-%d\" ) as start_date , CURRENT_DATE as end_date from aware_device order by _id ; SCR_SCRIPT and SRC_LANGUAGE are replaced by SRC_SCRIPT The attributes SCR_SCRIPT and SRC_LANGUAGE of every sensor PROVIDER are replaced by SRC_SCRIPT . SRC_SCRIPT is a relative path from the RAPIDS root folder to that provider\u2019s feature script. We did this to simplify and clarify where the features scripts are stored. There are no actions to take unless you created your own feature provider; update it with your feature script path.","title":"Migrating from RAPIDS 0.4.x or older"},{"location":"migrating-from-old-versions/#migrating-from-rapids-beta","text":"If you were relying on the old docs and the most recent version of RAPIDS you are working with is from or before Oct 13, 2020 you are using the beta version of RAPIDS. You can start using the RAPIDS 0.1.0 right away, just take into account the following: Deploy RAPIDS in a new folder Install a new copy of RAPIDS (the R and Python virtual environments didn\u2019t change so the cached versions will be reused) Make sure you don\u2019t skip a new Installation step to give execution permissions to the RAPIDS script: chmod +x rapids Move your old .env file Move your participant files Migrate your participant files You can migrate your old participant files to the new YAML format: python tools/update_format_participant_files.py Follow the new Configuration guide Follow the new Configuration guide Learn more about the new way to run RAPIDS Get familiar with the new way of Executing RAPIDS","title":"Migrating from RAPIDS beta"},{"location":"team/","text":"RAPIDS Team \u00b6 If you are interested in contributing feel free to submit a pull request or contact us. Core Team \u00b6 Julio Vega (Designer and Lead Developer) \u00b6 About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website Meng Li \u00b6 About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile Abhineeth Reddy Kunta \u00b6 About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University. Kwesi Aguillera \u00b6 About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile Echhit Joshi \u00b6 About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile Nicolas Leo \u00b6 About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems. Nikunj Goel \u00b6 About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile Community Contributors \u00b6 Agam Kumar \u00b6 About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile Yasaman S. Sefidgar \u00b6 About Linkedin Profile Joe Kim \u00b6 About Personal Website Brinnae Bent \u00b6 About Personal Website Stephen Price \u00b6 About Carnegie Mellon University Neil Singh \u00b6 About University of Virginia Advisors \u00b6 Afsaneh Doryab \u00b6 About Personal Website Carissa Low \u00b6 About Profile","title":"Team"},{"location":"team/#rapids-team","text":"If you are interested in contributing feel free to submit a pull request or contact us.","title":"RAPIDS Team"},{"location":"team/#core-team","text":"","title":"Core Team"},{"location":"team/#julio-vega-designer-and-lead-developer","text":"About Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data. vegaju at upmc . edu Personal Website","title":"Julio Vega (Designer and Lead Developer)"},{"location":"team/#meng-li","text":"About Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field. lim11 at upmc . edu Linkedin Profile Github Profile","title":"Meng Li"},{"location":"team/#abhineeth-reddy-kunta","text":"About Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master\u2019s degree in Computer Science from George Mason University.","title":"Abhineeth Reddy Kunta"},{"location":"team/#kwesi-aguillera","text":"About Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis. Linkedin Profile","title":"Kwesi Aguillera"},{"location":"team/#echhit-joshi","text":"About Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics. Linkedin Profile","title":"Echhit Joshi"},{"location":"team/#nicolas-leo","text":"About Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems.","title":"Nicolas Leo"},{"location":"team/#nikunj-goel","text":"About Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning. Linkedin Profile","title":"Nikunj Goel"},{"location":"team/#community-contributors","text":"","title":"Community Contributors"},{"location":"team/#agam-kumar","text":"About Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine. Linkedin Profile Github Profile","title":"Agam Kumar"},{"location":"team/#yasaman-s-sefidgar","text":"About Linkedin Profile","title":"Yasaman S. Sefidgar"},{"location":"team/#joe-kim","text":"About Personal Website","title":"Joe Kim"},{"location":"team/#brinnae-bent","text":"About Personal Website","title":"Brinnae Bent"},{"location":"team/#stephen-price","text":"About Carnegie Mellon University","title":"Stephen Price"},{"location":"team/#neil-singh","text":"About University of Virginia","title":"Neil Singh"},{"location":"team/#advisors","text":"","title":"Advisors"},{"location":"team/#afsaneh-doryab","text":"About Personal Website","title":"Afsaneh Doryab"},{"location":"team/#carissa-low","text":"About Profile","title":"Carissa Low"},{"location":"datastreams/add-new-data-streams/","text":"Add New Data Streams \u00b6 A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container . RAPIDS is agnostic to data streams\u2019 formats and container; see the Data Streams Introduction for a list of supported streams. A container is queried with an R or Python script that connects to the database, API or file where your stream\u2019s raw data is stored. A format is described using a format.yaml file that specifies how to map and mutate your stream\u2019s raw data to match the data and format RAPIDS needs. The most common cases when you would want to implement a new data stream are: You collected data with a mobile sensing app RAPIDS does not support yet. For example, Beiwe data stored in MySQL. You will need to define a new format file and a new container script. You collected data with a mobile sensing app RAPIDS supports, but this data is stored in a container that RAPIDS can\u2019t connect to yet. For example, AWARE data stored in PostgreSQL. In this case, you can reuse the format file of the aware_mysql stream, but you will need to implement a new container script. Hint Both the container.[R|py] and the format.yaml are stored in ./src/data/streams/[stream_name] where [stream_name] can be aware_mysql for example. Implement a Container \u00b6 The container script of a data stream can be implemented in R (strongly recommended) or python. This script must have two functions if you are implementing a stream for phone data or one function otherwise. The script can contain other auxiliary functions. First of all, add any parameters your script might need in config.yaml under (device)_DATA_STREAMS . These parameters will be available in the stream_parameters argument of the one or two functions you implement. For example, if you are adding support for Beiwe data stored in PostgreSQL and your container needs a set of credentials to connect to a database, your new data stream configuration would be: PHONE_DATA_STREAMS : USE : aware_python # AVAILABLE: aware_mysql : DATABASE_GROUP : MY_GROUP beiwe_postgresql : DATABASE_GROUP : MY_GROUP # users define this group (user, password, host, etc.) in credentials.yaml Then implement one or both of the following functions: pull_data This function returns the data columns for a specific sensor and participant. It has the following parameters: Param Description stream_parameters Any parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml . For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql] sensor_container The value set by the user in any [DEVICE_SENSOR][CONTAINER] key of config.yaml . It can be a table, file path, or whatever data source you want to support that contains the data from a single sensor for all participants . For example, [PHONE_ACCELEROMETER][CONTAINER] device The device id that you need to get the data for (this is set by the user in the participant files ). For example, in AWARE this device id is a uuid columns A list of the columns that you need to get from sensor_container . You specify these columns in your stream\u2019s format.yaml Example This is the pull_data function we implemented for aware_mysql . Note that we can message , warn or stop the user during execution. pull_data <- function ( stream_parameters , device , sensor_container , columns ){ # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R dbEngine <- get_db_engine ( stream_parameters $ DATABASE_GROUP ) query <- paste0 ( \"SELECT \" , paste ( columns , collapse = \",\" ), \" FROM \" , sensor_container , \" WHERE device_id = '\" , device , \"'\" ) # Letting the user know what we are doing message ( paste0 ( \"Executing the following query to download data: \" , query )) sensor_data <- dbGetQuery ( dbEngine , query ) dbDisconnect ( dbEngine ) if ( nrow ( sensor_data ) == 0 ) warning ( paste ( \"The device '\" , device , \"' did not have data in \" , sensor_container )) return ( sensor_data ) } infer_device_os Warning This function is only necessary for phone data streams. RAPIDS allows users to use the keyword infer (previously multiple ) to automatically infer the mobile Operative System a phone was running. If you have a way to infer the OS of a device id, implement this function. For example, for AWARE data we use the aware_device table. If you don\u2019t have a way to infer the OS, call stop(\"Error Message\") so other users know they can\u2019t use infer or the inference failed, and they have to assign the OS manually in the participant file. This function returns the operative system ( android or ios ) for a specific phone device id. It has the following parameters: Param Description stream_parameters Any parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml . For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql] device The device id that you need to infer the OS for (this is set by the user in the participant files ). For example, in AWARE this device id is a uuid Example This is the infer_device_os function we implemented for aware_mysql . Note that we can message , warn or stop the user during execution. infer_device_os <- function ( stream_parameters , device ){ # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R group <- stream_parameters $ DATABASE_GROUP dbEngine <- dbConnect ( MariaDB (), default.file = \"./.env\" , group = group ) query <- paste0 ( \"SELECT device_id,brand FROM aware_device WHERE device_id = '\" , device , \"'\" ) message ( paste0 ( \"Executing the following query to infer phone OS: \" , query )) os <- dbGetQuery ( dbEngine , query ) dbDisconnect ( dbEngine ) if ( nrow ( os ) > 0 ) return ( os %>% mutate ( os = ifelse ( brand == \"iPhone\" , \"ios\" , \"android\" )) %>% pull ( os )) else stop ( paste ( \"We cannot infer the OS of the following device id because it does not exist in the aware_device table:\" , device )) return ( os ) } Implement a Format \u00b6 A format file format.yaml describes the mapping between your stream\u2019s raw data and the data that RAPIDS needs. This file has a section per sensor (e.g. PHONE_ACCELEROMETER ), and each section has two attributes (keys): RAPIDS_COLUMN_MAPPINGS are mappings between the columns RAPIDS needs and the columns your raw data already has. The reserved keyword FLAG_TO_MUTATE flags columns that RAPIDS requires but that are not initially present in your container (database, CSV file). These columns have to be created by your mutation scripts. MUTATION . Sometimes your raw data needs to be transformed to match the format RAPIDS can handle (including creating columns marked as FLAG_TO_MUTATE ) COLUMN_MAPPINGS are mappings between the columns a mutation SCRIPT needs and the columns your raw data has. SCRIPTS are a collection of R or Python scripts that transform one or more raw data columns into the format RAPIDS needs. Hint [RAPIDS_COLUMN_MAPPINGS] and [MUTATE][COLUMN_MAPPINGS] have a key (left-hand side string) and a value (right-hand side string). The values are the names used to pulled columns from a container (e.g., columns in a database table). All values are renamed to their keys in lower case. The renamed columns are sent to every mutation script within the data argument, and the final output is the input RAPIDS process further. For example, let\u2019s assume we are implementing beiwe_mysql and defining the following format for PHONE_FAKESENSOR : PHONE_FAKESENSOR : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID MAGNITUDE_SQUARED : FLAG_TO_MUTATE MUTATE : COLUMN_MAPPINGS : MAGNITUDE : beiwe_value SCRIPTS : - src/data/streams/mutations/phone/square_magnitude.py RAPIDS will: Download beiwe_timestamp , beiwe_deviceID , and beiwe_value from the container of beiwe_mysql (MySQL DB) Rename these columns to timestamp , device_id , and magnitude , respectively. Execute square_magnitude.py with a data frame as an argument containing the renamed columns. This script will square magnitude and rename it to magnitude_squared Verify the data frame returned by square_magnitude.py has the columns RAPIDS needs timestamp , device_id , and magnitude_squared . Use this data frame as the input to be processed in the pipeline. Note that although RAPIDS_COLUMN_MAPPINGS and [MUTATE][COLUMN_MAPPINGS] keys are in capital letters for readability (e.g. MAGNITUDE_SQUARED ), the names of the final columns you mutate in your scripts should be lower case. Let\u2019s explain in more depth this column mapping with examples. Name mapping \u00b6 The mapping for some sensors is straightforward. For example, accelerometer data most of the time has a timestamp, three axes (x,y,z), and a device id that produced it. AWARE and a different sensing app like Beiwe likely logged accelerometer data in the same way but with different column names. In this case, we only need to match Beiwe data columns to RAPIDS columns one-to-one: PHONE_ACCELEROMETER : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID DOUBLE_VALUES_0 : beiwe_x DOUBLE_VALUES_1 : beiwe_y DOUBLE_VALUES_2 : beiwe_z MUTATE : COLUMN_MAPPINGS : SCRIPTS : # it's ok if this is empty Value mapping \u00b6 For some sensors, we need to map column names and values. For example, screen data has ON and OFF events; let\u2019s suppose Beiwe represents an ON event with the number 1, but RAPIDS identifies ON events with the number 2 . In this case, we need to mutate the raw data coming from Beiwe and replace all 1 s with 2 s. We do this by listing one or more R or Python scripts in MUTATION_SCRIPTS that will be executed in order. We usually store all mutation scripts under src/data/streams/mutations/[device]/[platform]/ and they can be reused across data streams. PHONE_SCREEN : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID EVENT : beiwe_event MUTATE : COLUMN_MAPPINGS : SCRIPTS : - src/data/streams/mutations/phone/beiwe/beiwe_screen_map.py Hint A MUTATION_SCRIPT can also be used to clean/preprocess your data before extracting behavioral features. A mutation script has to have a main function that receives two arguments, data and stream_parameters . The stream_parameters argument contains the config.yaml key/values of your data stream (this is the same argument that your container.[py|R] script receives, see Implement a Container ). python Example of a python mutation script import pandas as pd def main ( data , stream_parameters ): # mutate data return ( data ) R Example of a R mutation script source ( \"renv/activate.R\" ) # needed to use RAPIDS renv environment library ( dplyr ) main <- function ( data , stream_parameters ){ # mutate data return ( data ) } Complex mapping \u00b6 Sometimes, your raw data doesn\u2019t even have the same columns RAPIDS expects for a sensor. For example, let\u2019s pretend Beiwe stores PHONE_ACCELEROMETER axis data in a single column called acc_col instead of three. You have to create a MUTATION_SCRIPT to split acc_col into three columns x , y , and z . For this, you mark the three axes columns RAPIDS needs in [RAPIDS_COLUMN_MAPPINGS] with the word FLAG_TO_MUTATE , map acc_col in [MUTATION][COLUMN_MAPPINGS] , and list a Python script under [MUTATION][SCRIPTS] with the code to split acc_col . See an example below. RAPIDS expects that every column mapped as FLAG_TO_MUTATE will be generated by your mutation script, so it won\u2019t try to retrieve them from your container (database, CSV file, etc.). In our example, acc_col will be fetched from the stream\u2019s container and renamed to JOINED_AXES because beiwe_split_acc.py will split it into double_values_0 , double_values_1 , and double_values_2 . PHONE_ACCELEROMETER : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID DOUBLE_VALUES_0 : FLAG_TO_MUTATE DOUBLE_VALUES_1 : FLAG_TO_MUTATE DOUBLE_VALUES_2 : FLAG_TO_MUTATE MUTATE : COLUMN_MAPPINGS : JOINED_AXES : acc_col SCRIPTS : - src/data/streams/mutations/phone/beiwe/beiwe_split_acc.py This is a draft of beiwe_split_acc.py MUTATION_SCRIPT : import pandas as pd def main ( data , stream_parameters ): # data has the acc_col # split acc_col into three columns: double_values_0, double_values_1, double_values_2 to match RAPIDS format # remove acc_col since we don't need it anymore return ( data ) OS complex mapping \u00b6 There is a special case for a complex mapping scenario for smartphone data streams. The Android and iOS sensor APIs return data in different formats for certain sensors (like screen, activity recognition, battery, among others). In case you didn\u2019t notice, the examples we have used so far are grouped under an ANDROID key, which means they will be applied to data collected by Android phones. Additionally, each sensor has an IOS key for a similar purpose. We use the complex mapping described above to transform iOS data into an Android format (it\u2019s always iOS to Android and any new phone data stream must do the same). For example, this is the format.yaml key for PHONE_ACTVITY_RECOGNITION . Note that the ANDROID mapping is simple (one-to-one) but the IOS mapping is complex with three FLAG_TO_MUTATE columns, two [MUTATE][COLUMN_MAPPINGS] mappings, and one [MUTATION][SCRIPT] . PHONE_ACTIVITY_RECOGNITION : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : timestamp DEVICE_ID : device_id ACTIVITY_TYPE : activity_type ACTIVITY_NAME : activity_name CONFIDENCE : confidence MUTATION : COLUMN_MAPPINGS : SCRIPTS : IOS : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : timestamp DEVICE_ID : device_id ACTIVITY_TYPE : FLAG_TO_MUTATE ACTIVITY_NAME : FLAG_TO_MUTATE CONFIDENCE : FLAG_TO_MUTATE MUTATION : COLUMN_MAPPINGS : ACTIVITIES : activities CONFIDENCE : confidence SCRIPTS : - \"src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R\" Example activity_recogniton_ios_unification.R In this MUTATION_SCRIPT we create ACTIVITY_NAME and ACTIVITY_TYPE based on activities , and map confidence iOS values to Android values. source ( \"renv/activate.R\" ) library ( \"dplyr\" , warn.conflicts = F ) library ( stringr ) clean_ios_activity_column <- function ( ios_gar ){ ios_gar <- ios_gar %>% mutate ( activities = str_replace_all ( activities , pattern = '(\"|\\\\[|\\\\])' , replacement = \"\" )) existent_multiple_activities <- ios_gar %>% filter ( str_detect ( activities , \",\" )) %>% group_by ( activities ) %>% summarise ( mutiple_activities = unique ( activities ), .groups = \"drop_last\" ) %>% pull ( mutiple_activities ) known_multiple_activities <- c ( \"stationary,automotive\" ) unkown_multiple_actvities <- setdiff ( existent_multiple_activities , known_multiple_activities ) if ( length ( unkown_multiple_actvities ) > 0 ){ stop ( paste0 ( \"There are unkwown combinations of ios activities, you need to implement the decision of the ones to keep: \" , unkown_multiple_actvities )) } ios_gar <- ios_gar %>% mutate ( activities = str_replace_all ( activities , pattern = \"stationary,automotive\" , replacement = \"automotive\" )) return ( ios_gar ) } unify_ios_activity_recognition <- function ( ios_gar ){ # We only need to unify Google Activity Recognition data for iOS # discard rows where activities column is blank ios_gar <- ios_gar [ - which ( ios_gar $ activities == \"\" ), ] # clean \"activities\" column of ios_gar ios_gar <- clean_ios_activity_column ( ios_gar ) # make it compatible with android version: generate \"activity_name\" and \"activity_type\" columns ios_gar <- ios_gar %>% mutate ( activity_name = case_when ( activities == \"automotive\" ~ \"in_vehicle\" , activities == \"cycling\" ~ \"on_bicycle\" , activities == \"walking\" ~ \"walking\" , activities == \"running\" ~ \"running\" , activities == \"stationary\" ~ \"still\" ), activity_type = case_when ( activities == \"automotive\" ~ 0 , activities == \"cycling\" ~ 1 , activities == \"walking\" ~ 7 , activities == \"running\" ~ 8 , activities == \"stationary\" ~ 3 , activities == \"unknown\" ~ 4 ), confidence = case_when ( confidence == 0 ~ 0 , confidence == 1 ~ 50 , confidence == 2 ~ 100 ) ) %>% select ( - activities ) return ( ios_gar ) } main <- function ( data , stream_parameters ){ return ( unify_ios_activity_recognition ( data , stream_parameters )) }","title":"Add New Data Streams"},{"location":"datastreams/add-new-data-streams/#add-new-data-streams","text":"A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container . RAPIDS is agnostic to data streams\u2019 formats and container; see the Data Streams Introduction for a list of supported streams. A container is queried with an R or Python script that connects to the database, API or file where your stream\u2019s raw data is stored. A format is described using a format.yaml file that specifies how to map and mutate your stream\u2019s raw data to match the data and format RAPIDS needs. The most common cases when you would want to implement a new data stream are: You collected data with a mobile sensing app RAPIDS does not support yet. For example, Beiwe data stored in MySQL. You will need to define a new format file and a new container script. You collected data with a mobile sensing app RAPIDS supports, but this data is stored in a container that RAPIDS can\u2019t connect to yet. For example, AWARE data stored in PostgreSQL. In this case, you can reuse the format file of the aware_mysql stream, but you will need to implement a new container script. Hint Both the container.[R|py] and the format.yaml are stored in ./src/data/streams/[stream_name] where [stream_name] can be aware_mysql for example.","title":"Add New Data Streams"},{"location":"datastreams/add-new-data-streams/#implement-a-container","text":"The container script of a data stream can be implemented in R (strongly recommended) or python. This script must have two functions if you are implementing a stream for phone data or one function otherwise. The script can contain other auxiliary functions. First of all, add any parameters your script might need in config.yaml under (device)_DATA_STREAMS . These parameters will be available in the stream_parameters argument of the one or two functions you implement. For example, if you are adding support for Beiwe data stored in PostgreSQL and your container needs a set of credentials to connect to a database, your new data stream configuration would be: PHONE_DATA_STREAMS : USE : aware_python # AVAILABLE: aware_mysql : DATABASE_GROUP : MY_GROUP beiwe_postgresql : DATABASE_GROUP : MY_GROUP # users define this group (user, password, host, etc.) in credentials.yaml Then implement one or both of the following functions: pull_data This function returns the data columns for a specific sensor and participant. It has the following parameters: Param Description stream_parameters Any parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml . For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql] sensor_container The value set by the user in any [DEVICE_SENSOR][CONTAINER] key of config.yaml . It can be a table, file path, or whatever data source you want to support that contains the data from a single sensor for all participants . For example, [PHONE_ACCELEROMETER][CONTAINER] device The device id that you need to get the data for (this is set by the user in the participant files ). For example, in AWARE this device id is a uuid columns A list of the columns that you need to get from sensor_container . You specify these columns in your stream\u2019s format.yaml Example This is the pull_data function we implemented for aware_mysql . Note that we can message , warn or stop the user during execution. pull_data <- function ( stream_parameters , device , sensor_container , columns ){ # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R dbEngine <- get_db_engine ( stream_parameters $ DATABASE_GROUP ) query <- paste0 ( \"SELECT \" , paste ( columns , collapse = \",\" ), \" FROM \" , sensor_container , \" WHERE device_id = '\" , device , \"'\" ) # Letting the user know what we are doing message ( paste0 ( \"Executing the following query to download data: \" , query )) sensor_data <- dbGetQuery ( dbEngine , query ) dbDisconnect ( dbEngine ) if ( nrow ( sensor_data ) == 0 ) warning ( paste ( \"The device '\" , device , \"' did not have data in \" , sensor_container )) return ( sensor_data ) } infer_device_os Warning This function is only necessary for phone data streams. RAPIDS allows users to use the keyword infer (previously multiple ) to automatically infer the mobile Operative System a phone was running. If you have a way to infer the OS of a device id, implement this function. For example, for AWARE data we use the aware_device table. If you don\u2019t have a way to infer the OS, call stop(\"Error Message\") so other users know they can\u2019t use infer or the inference failed, and they have to assign the OS manually in the participant file. This function returns the operative system ( android or ios ) for a specific phone device id. It has the following parameters: Param Description stream_parameters Any parameters (keys/values) set by the user in any [DEVICE_DATA_STREAMS][stream_name] key of config.yaml . For example, [DATABASE_GROUP] inside [FITBIT_DATA_STREAMS][fitbitjson_mysql] device The device id that you need to infer the OS for (this is set by the user in the participant files ). For example, in AWARE this device id is a uuid Example This is the infer_device_os function we implemented for aware_mysql . Note that we can message , warn or stop the user during execution. infer_device_os <- function ( stream_parameters , device ){ # get_db_engine is an auxiliary function not shown here for brevity bu can be found in src/data/streams/aware_mysql/container.R group <- stream_parameters $ DATABASE_GROUP dbEngine <- dbConnect ( MariaDB (), default.file = \"./.env\" , group = group ) query <- paste0 ( \"SELECT device_id,brand FROM aware_device WHERE device_id = '\" , device , \"'\" ) message ( paste0 ( \"Executing the following query to infer phone OS: \" , query )) os <- dbGetQuery ( dbEngine , query ) dbDisconnect ( dbEngine ) if ( nrow ( os ) > 0 ) return ( os %>% mutate ( os = ifelse ( brand == \"iPhone\" , \"ios\" , \"android\" )) %>% pull ( os )) else stop ( paste ( \"We cannot infer the OS of the following device id because it does not exist in the aware_device table:\" , device )) return ( os ) }","title":"Implement a Container"},{"location":"datastreams/add-new-data-streams/#implement-a-format","text":"A format file format.yaml describes the mapping between your stream\u2019s raw data and the data that RAPIDS needs. This file has a section per sensor (e.g. PHONE_ACCELEROMETER ), and each section has two attributes (keys): RAPIDS_COLUMN_MAPPINGS are mappings between the columns RAPIDS needs and the columns your raw data already has. The reserved keyword FLAG_TO_MUTATE flags columns that RAPIDS requires but that are not initially present in your container (database, CSV file). These columns have to be created by your mutation scripts. MUTATION . Sometimes your raw data needs to be transformed to match the format RAPIDS can handle (including creating columns marked as FLAG_TO_MUTATE ) COLUMN_MAPPINGS are mappings between the columns a mutation SCRIPT needs and the columns your raw data has. SCRIPTS are a collection of R or Python scripts that transform one or more raw data columns into the format RAPIDS needs. Hint [RAPIDS_COLUMN_MAPPINGS] and [MUTATE][COLUMN_MAPPINGS] have a key (left-hand side string) and a value (right-hand side string). The values are the names used to pulled columns from a container (e.g., columns in a database table). All values are renamed to their keys in lower case. The renamed columns are sent to every mutation script within the data argument, and the final output is the input RAPIDS process further. For example, let\u2019s assume we are implementing beiwe_mysql and defining the following format for PHONE_FAKESENSOR : PHONE_FAKESENSOR : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID MAGNITUDE_SQUARED : FLAG_TO_MUTATE MUTATE : COLUMN_MAPPINGS : MAGNITUDE : beiwe_value SCRIPTS : - src/data/streams/mutations/phone/square_magnitude.py RAPIDS will: Download beiwe_timestamp , beiwe_deviceID , and beiwe_value from the container of beiwe_mysql (MySQL DB) Rename these columns to timestamp , device_id , and magnitude , respectively. Execute square_magnitude.py with a data frame as an argument containing the renamed columns. This script will square magnitude and rename it to magnitude_squared Verify the data frame returned by square_magnitude.py has the columns RAPIDS needs timestamp , device_id , and magnitude_squared . Use this data frame as the input to be processed in the pipeline. Note that although RAPIDS_COLUMN_MAPPINGS and [MUTATE][COLUMN_MAPPINGS] keys are in capital letters for readability (e.g. MAGNITUDE_SQUARED ), the names of the final columns you mutate in your scripts should be lower case. Let\u2019s explain in more depth this column mapping with examples.","title":"Implement a Format"},{"location":"datastreams/add-new-data-streams/#name-mapping","text":"The mapping for some sensors is straightforward. For example, accelerometer data most of the time has a timestamp, three axes (x,y,z), and a device id that produced it. AWARE and a different sensing app like Beiwe likely logged accelerometer data in the same way but with different column names. In this case, we only need to match Beiwe data columns to RAPIDS columns one-to-one: PHONE_ACCELEROMETER : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID DOUBLE_VALUES_0 : beiwe_x DOUBLE_VALUES_1 : beiwe_y DOUBLE_VALUES_2 : beiwe_z MUTATE : COLUMN_MAPPINGS : SCRIPTS : # it's ok if this is empty","title":"Name mapping"},{"location":"datastreams/add-new-data-streams/#value-mapping","text":"For some sensors, we need to map column names and values. For example, screen data has ON and OFF events; let\u2019s suppose Beiwe represents an ON event with the number 1, but RAPIDS identifies ON events with the number 2 . In this case, we need to mutate the raw data coming from Beiwe and replace all 1 s with 2 s. We do this by listing one or more R or Python scripts in MUTATION_SCRIPTS that will be executed in order. We usually store all mutation scripts under src/data/streams/mutations/[device]/[platform]/ and they can be reused across data streams. PHONE_SCREEN : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID EVENT : beiwe_event MUTATE : COLUMN_MAPPINGS : SCRIPTS : - src/data/streams/mutations/phone/beiwe/beiwe_screen_map.py Hint A MUTATION_SCRIPT can also be used to clean/preprocess your data before extracting behavioral features. A mutation script has to have a main function that receives two arguments, data and stream_parameters . The stream_parameters argument contains the config.yaml key/values of your data stream (this is the same argument that your container.[py|R] script receives, see Implement a Container ). python Example of a python mutation script import pandas as pd def main ( data , stream_parameters ): # mutate data return ( data ) R Example of a R mutation script source ( \"renv/activate.R\" ) # needed to use RAPIDS renv environment library ( dplyr ) main <- function ( data , stream_parameters ){ # mutate data return ( data ) }","title":"Value mapping"},{"location":"datastreams/add-new-data-streams/#complex-mapping","text":"Sometimes, your raw data doesn\u2019t even have the same columns RAPIDS expects for a sensor. For example, let\u2019s pretend Beiwe stores PHONE_ACCELEROMETER axis data in a single column called acc_col instead of three. You have to create a MUTATION_SCRIPT to split acc_col into three columns x , y , and z . For this, you mark the three axes columns RAPIDS needs in [RAPIDS_COLUMN_MAPPINGS] with the word FLAG_TO_MUTATE , map acc_col in [MUTATION][COLUMN_MAPPINGS] , and list a Python script under [MUTATION][SCRIPTS] with the code to split acc_col . See an example below. RAPIDS expects that every column mapped as FLAG_TO_MUTATE will be generated by your mutation script, so it won\u2019t try to retrieve them from your container (database, CSV file, etc.). In our example, acc_col will be fetched from the stream\u2019s container and renamed to JOINED_AXES because beiwe_split_acc.py will split it into double_values_0 , double_values_1 , and double_values_2 . PHONE_ACCELEROMETER : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : beiwe_timestamp DEVICE_ID : beiwe_deviceID DOUBLE_VALUES_0 : FLAG_TO_MUTATE DOUBLE_VALUES_1 : FLAG_TO_MUTATE DOUBLE_VALUES_2 : FLAG_TO_MUTATE MUTATE : COLUMN_MAPPINGS : JOINED_AXES : acc_col SCRIPTS : - src/data/streams/mutations/phone/beiwe/beiwe_split_acc.py This is a draft of beiwe_split_acc.py MUTATION_SCRIPT : import pandas as pd def main ( data , stream_parameters ): # data has the acc_col # split acc_col into three columns: double_values_0, double_values_1, double_values_2 to match RAPIDS format # remove acc_col since we don't need it anymore return ( data )","title":"Complex mapping"},{"location":"datastreams/add-new-data-streams/#os-complex-mapping","text":"There is a special case for a complex mapping scenario for smartphone data streams. The Android and iOS sensor APIs return data in different formats for certain sensors (like screen, activity recognition, battery, among others). In case you didn\u2019t notice, the examples we have used so far are grouped under an ANDROID key, which means they will be applied to data collected by Android phones. Additionally, each sensor has an IOS key for a similar purpose. We use the complex mapping described above to transform iOS data into an Android format (it\u2019s always iOS to Android and any new phone data stream must do the same). For example, this is the format.yaml key for PHONE_ACTVITY_RECOGNITION . Note that the ANDROID mapping is simple (one-to-one) but the IOS mapping is complex with three FLAG_TO_MUTATE columns, two [MUTATE][COLUMN_MAPPINGS] mappings, and one [MUTATION][SCRIPT] . PHONE_ACTIVITY_RECOGNITION : ANDROID : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : timestamp DEVICE_ID : device_id ACTIVITY_TYPE : activity_type ACTIVITY_NAME : activity_name CONFIDENCE : confidence MUTATION : COLUMN_MAPPINGS : SCRIPTS : IOS : RAPIDS_COLUMN_MAPPINGS : TIMESTAMP : timestamp DEVICE_ID : device_id ACTIVITY_TYPE : FLAG_TO_MUTATE ACTIVITY_NAME : FLAG_TO_MUTATE CONFIDENCE : FLAG_TO_MUTATE MUTATION : COLUMN_MAPPINGS : ACTIVITIES : activities CONFIDENCE : confidence SCRIPTS : - \"src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R\" Example activity_recogniton_ios_unification.R In this MUTATION_SCRIPT we create ACTIVITY_NAME and ACTIVITY_TYPE based on activities , and map confidence iOS values to Android values. source ( \"renv/activate.R\" ) library ( \"dplyr\" , warn.conflicts = F ) library ( stringr ) clean_ios_activity_column <- function ( ios_gar ){ ios_gar <- ios_gar %>% mutate ( activities = str_replace_all ( activities , pattern = '(\"|\\\\[|\\\\])' , replacement = \"\" )) existent_multiple_activities <- ios_gar %>% filter ( str_detect ( activities , \",\" )) %>% group_by ( activities ) %>% summarise ( mutiple_activities = unique ( activities ), .groups = \"drop_last\" ) %>% pull ( mutiple_activities ) known_multiple_activities <- c ( \"stationary,automotive\" ) unkown_multiple_actvities <- setdiff ( existent_multiple_activities , known_multiple_activities ) if ( length ( unkown_multiple_actvities ) > 0 ){ stop ( paste0 ( \"There are unkwown combinations of ios activities, you need to implement the decision of the ones to keep: \" , unkown_multiple_actvities )) } ios_gar <- ios_gar %>% mutate ( activities = str_replace_all ( activities , pattern = \"stationary,automotive\" , replacement = \"automotive\" )) return ( ios_gar ) } unify_ios_activity_recognition <- function ( ios_gar ){ # We only need to unify Google Activity Recognition data for iOS # discard rows where activities column is blank ios_gar <- ios_gar [ - which ( ios_gar $ activities == \"\" ), ] # clean \"activities\" column of ios_gar ios_gar <- clean_ios_activity_column ( ios_gar ) # make it compatible with android version: generate \"activity_name\" and \"activity_type\" columns ios_gar <- ios_gar %>% mutate ( activity_name = case_when ( activities == \"automotive\" ~ \"in_vehicle\" , activities == \"cycling\" ~ \"on_bicycle\" , activities == \"walking\" ~ \"walking\" , activities == \"running\" ~ \"running\" , activities == \"stationary\" ~ \"still\" ), activity_type = case_when ( activities == \"automotive\" ~ 0 , activities == \"cycling\" ~ 1 , activities == \"walking\" ~ 7 , activities == \"running\" ~ 8 , activities == \"stationary\" ~ 3 , activities == \"unknown\" ~ 4 ), confidence = case_when ( confidence == 0 ~ 0 , confidence == 1 ~ 50 , confidence == 2 ~ 100 ) ) %>% select ( - activities ) return ( ios_gar ) } main <- function ( data , stream_parameters ){ return ( unify_ios_activity_recognition ( data , stream_parameters )) }","title":"OS complex mapping"},{"location":"datastreams/aware-csv/","text":"aware_csv \u00b6 This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in CSV files. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . See examples in the CSV files inside rapids_example_csv.zip Example of a valid CSV file \"_id\",\"timestamp\",\"device_id\",\"activities\",\"confidence\",\"stationary\",\"walking\",\"running\",\"automotive\",\"cycling\",\"unknown\",\"label\" 1,1587528000000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"\" 2,1587528060000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 3,1587528120000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 4,1587528180000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 5,1587528240000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 6,1587528300000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 7,1587528360000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" Container \u00b6 A CSV file per sensor, each containing the data for all participants. The script to connect and download data from this container is at: src/data/streams/aware_csv/container.R Format \u00b6 If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"aware_csv"},{"location":"datastreams/aware-csv/#aware_csv","text":"This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in CSV files. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . See examples in the CSV files inside rapids_example_csv.zip Example of a valid CSV file \"_id\",\"timestamp\",\"device_id\",\"activities\",\"confidence\",\"stationary\",\"walking\",\"running\",\"automotive\",\"cycling\",\"unknown\",\"label\" 1,1587528000000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"\" 2,1587528060000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 3,1587528120000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 4,1587528180000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 5,1587528240000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 6,1587528300000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\" 7,1587528360000,\"13dbc8a3-dae3-4834-823a-4bc96a7d459d\",\"[\\\"stationary\\\"]\",2,1,0,0,0,0,0,\"supplement\"","title":"aware_csv"},{"location":"datastreams/aware-csv/#container","text":"A CSV file per sensor, each containing the data for all participants. The script to connect and download data from this container is at: src/data/streams/aware_csv/container.R","title":"Container"},{"location":"datastreams/aware-csv/#format","text":"If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"Format"},{"location":"datastreams/aware-influxdb/","text":"aware_influxdb (beta) \u00b6 Warning This data stream is being released in beta while we test it thoroughly. This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in an InfluxDB database. Container \u00b6 An InfluxDB database with a table per sensor, each containing the data for all participants. The script to connect and download data from this container is at: src/data/streams/aware_influxdb/container.R Format \u00b6 If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"aware_influxdb (beta)"},{"location":"datastreams/aware-influxdb/#aware_influxdb-beta","text":"Warning This data stream is being released in beta while we test it thoroughly. This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in an InfluxDB database.","title":"aware_influxdb (beta)"},{"location":"datastreams/aware-influxdb/#container","text":"An InfluxDB database with a table per sensor, each containing the data for all participants. The script to connect and download data from this container is at: src/data/streams/aware_influxdb/container.R","title":"Container"},{"location":"datastreams/aware-influxdb/#format","text":"If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"Format"},{"location":"datastreams/aware-mysql/","text":"aware_mysql \u00b6 This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in a MySQL database. Container \u00b6 A MySQL database with a table per sensor, each containing the data for all participants. This is the default database created by the old PHP AWARE server (as opposed to the new JavaScript Micro server). The script to connect and download data from this container is at: src/data/streams/aware_mysql/container.R Format \u00b6 If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"aware_mysql"},{"location":"datastreams/aware-mysql/#aware_mysql","text":"This data stream handles iOS and Android sensor data collected with the AWARE Framework and stored in a MySQL database.","title":"aware_mysql"},{"location":"datastreams/aware-mysql/#container","text":"A MySQL database with a table per sensor, each containing the data for all participants. This is the default database created by the old PHP AWARE server (as opposed to the new JavaScript Micro server). The script to connect and download data from this container is at: src/data/streams/aware_mysql/container.R","title":"Container"},{"location":"datastreams/aware-mysql/#format","text":"If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"Format"},{"location":"datastreams/data-streams-introduction/","text":"Data Streams Introduction \u00b6 A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container . For example, the aware_mysql data stream handles smartphone data ( device ) collected with the AWARE Framework ( format ) stored in a MySQL database ( container ). Similarly, smartphone data collected with Beiwe will have a different format and could be stored in a container like a PostgreSQL database or a CSV file. If you want to process a data stream using RAPIDS, make sure that your data is stored in a supported format and container (see table below). If RAPIDS doesn\u2019t support your data stream yet (e.g. Beiwe data stored in PostgreSQL, or AWARE data stored in SQLite), you can always implement a new data stream . If it\u2019s something you think other people might be interested on, we will be happy to include your new data stream in RAPIDS, so get in touch!. Hint Currently, you can add new data streams for smartphones, Fitbit, and Empatica devices. If you need RAPIDS to process data from other devices , like Oura Rings or Actigraph wearables, get in touch. It is a more complicated process that could take a couple of days to implement for someone familiar with R or Python, but we would be happy to work on it together. For reference, these are the data streams we currently support: Data Stream Device Format Container Docs aware_mysql Phone AWARE app MySQL link aware_csv Phone AWARE app CSV files link aware_influxdb (beta) Phone AWARE app InfluxDB link fitbitjson_mysql Fitbit JSON (per Fitbit\u2019s API ) MySQL link fitbitjson_csv Fitbit JSON (per Fitbit\u2019s API ) CSV files link fitbitparsed_mysql Fitbit Parsed (parsed API data) MySQL link fitbitparsed_csv Fitbit Parsed (parsed API data) CSV files link empatica_zip Empatica E4 Connect ZIP files link","title":"Introduction"},{"location":"datastreams/data-streams-introduction/#data-streams-introduction","text":"A data stream is a set of sensor data collected using a specific type of device with a specific format and stored in a specific container . For example, the aware_mysql data stream handles smartphone data ( device ) collected with the AWARE Framework ( format ) stored in a MySQL database ( container ). Similarly, smartphone data collected with Beiwe will have a different format and could be stored in a container like a PostgreSQL database or a CSV file. If you want to process a data stream using RAPIDS, make sure that your data is stored in a supported format and container (see table below). If RAPIDS doesn\u2019t support your data stream yet (e.g. Beiwe data stored in PostgreSQL, or AWARE data stored in SQLite), you can always implement a new data stream . If it\u2019s something you think other people might be interested on, we will be happy to include your new data stream in RAPIDS, so get in touch!. Hint Currently, you can add new data streams for smartphones, Fitbit, and Empatica devices. If you need RAPIDS to process data from other devices , like Oura Rings or Actigraph wearables, get in touch. It is a more complicated process that could take a couple of days to implement for someone familiar with R or Python, but we would be happy to work on it together. For reference, these are the data streams we currently support: Data Stream Device Format Container Docs aware_mysql Phone AWARE app MySQL link aware_csv Phone AWARE app CSV files link aware_influxdb (beta) Phone AWARE app InfluxDB link fitbitjson_mysql Fitbit JSON (per Fitbit\u2019s API ) MySQL link fitbitjson_csv Fitbit JSON (per Fitbit\u2019s API ) CSV files link fitbitparsed_mysql Fitbit Parsed (parsed API data) MySQL link fitbitparsed_csv Fitbit Parsed (parsed API data) CSV files link empatica_zip Empatica E4 Connect ZIP files link","title":"Data Streams Introduction"},{"location":"datastreams/empatica-zip/","text":"empatica_zip \u00b6 This data stream handles Empatica sensor data downloaded as zip files using the E4 Connect . Container \u00b6 You need to create a subfolder for every participant named after their device id inside the folder specified by [EMPATICA_DATA_STREAMS][empatica_zipfiles][FOLDER] . You can add one or more Empatica zip files to any subfolder. The script to connect and download data from this container is at: src/data/streams/empatica_zip/container.R Format \u00b6 The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Empatica sensors . This file is at: src/data/streams/empatica_zip/format.yaml All columns are mutated from the raw data in the zip files so you don\u2019t need to modify any column mappings. EMPATICA_ACCELEROMETER RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_HEARTRATE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id HEARTRATE heartrate MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_TEMPERATURE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id TEMPERATURE temperature MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_ELECTRODERMAL_ACTIVITY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ELECTRODERMAL_ACTIVITY electrodermal_activity MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_BLOOD_VOLUME_PULSE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BLOOD_VOLUME_PULSE blood_volume_pulse MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_INTER_BEAT_INTERVAL RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id INTER_BEAT_INTERVAL inter_beat_interval MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_EMPATICA_TAGS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id TAGS tags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None)","title":"empatica_zip"},{"location":"datastreams/empatica-zip/#empatica_zip","text":"This data stream handles Empatica sensor data downloaded as zip files using the E4 Connect .","title":"empatica_zip"},{"location":"datastreams/empatica-zip/#container","text":"You need to create a subfolder for every participant named after their device id inside the folder specified by [EMPATICA_DATA_STREAMS][empatica_zipfiles][FOLDER] . You can add one or more Empatica zip files to any subfolder. The script to connect and download data from this container is at: src/data/streams/empatica_zip/container.R","title":"Container"},{"location":"datastreams/empatica-zip/#format","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Empatica sensors . This file is at: src/data/streams/empatica_zip/format.yaml All columns are mutated from the raw data in the zip files so you don\u2019t need to modify any column mappings. EMPATICA_ACCELEROMETER RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_HEARTRATE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id HEARTRATE heartrate MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_TEMPERATURE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id TEMPERATURE temperature MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_ELECTRODERMAL_ACTIVITY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ELECTRODERMAL_ACTIVITY electrodermal_activity MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_BLOOD_VOLUME_PULSE RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BLOOD_VOLUME_PULSE blood_volume_pulse MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_INTER_BEAT_INTERVAL RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id INTER_BEAT_INTERVAL inter_beat_interval MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) EMPATICA_EMPATICA_TAGS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id TAGS tags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None)","title":"Format"},{"location":"datastreams/fitbitjson-csv/","text":"fitbitjson_csv \u00b6 This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a CSV file, RAPIDS can process it. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . Example of a valid CSV file \"timestamp\",\"device_id\",\"label\",\"fitbit_id\",\"fitbit_data_type\",\"fitbit_data\" 1587614400000,\"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",\"5S\",\"5ZKN9B\",\"steps\",\"{\\\"activities-steps\\\":[{\\\"dateTime\\\":\\\"2020-04-23\\\",\\\"value\\\":\\\"7881\\\"}]\" Container \u00b6 The container should be a CSV file per Fitbit sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitjson_csv/container.R Format \u00b6 The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitjson_csv/format.yaml If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml . FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESOUTOFRANGE FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESFATBURN FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESCARDIO FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESPEAK FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE FLAG_TO_MUTATE HEARTRATE_ZONE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME FLAG_TO_MUTATE LOCAL_END_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id EFFICIENCY FLAG_TO_MUTATE MINUTES_AFTER_WAKEUP FLAG_TO_MUTATE MINUTES_ASLEEP FLAG_TO_MUTATE MINUTES_AWAKE FLAG_TO_MUTATE MINUTES_TO_FALL_ASLEEP FLAG_TO_MUTATE MINUTES_IN_BED FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id TYPE_EPISODE_ID FLAG_TO_MUTATE DURATION FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE LEVEL FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2, we support both. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API . See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}}","title":"fitbitjson_csv"},{"location":"datastreams/fitbitjson-csv/#fitbitjson_csv","text":"This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a CSV file, RAPIDS can process it. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . Example of a valid CSV file \"timestamp\",\"device_id\",\"label\",\"fitbit_id\",\"fitbit_data_type\",\"fitbit_data\" 1587614400000,\"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",\"5S\",\"5ZKN9B\",\"steps\",\"{\\\"activities-steps\\\":[{\\\"dateTime\\\":\\\"2020-04-23\\\",\\\"value\\\":\\\"7881\\\"}]\"","title":"fitbitjson_csv"},{"location":"datastreams/fitbitjson-csv/#container","text":"The container should be a CSV file per Fitbit sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitjson_csv/container.R","title":"Container"},{"location":"datastreams/fitbitjson-csv/#format","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitjson_csv/format.yaml If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml . FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESOUTOFRANGE FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESFATBURN FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESCARDIO FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESPEAK FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE FLAG_TO_MUTATE HEARTRATE_ZONE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME FLAG_TO_MUTATE LOCAL_END_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id EFFICIENCY FLAG_TO_MUTATE MINUTES_AFTER_WAKEUP FLAG_TO_MUTATE MINUTES_ASLEEP FLAG_TO_MUTATE MINUTES_AWAKE FLAG_TO_MUTATE MINUTES_TO_FALL_ASLEEP FLAG_TO_MUTATE MINUTES_IN_BED FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id TYPE_EPISODE_ID FLAG_TO_MUTATE DURATION FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE LEVEL FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2, we support both. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API . See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}}","title":"Format"},{"location":"datastreams/fitbitjson-mysql/","text":"fitbitjson_mysql \u00b6 This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a MySQL database, RAPIDS can process it. Container \u00b6 The container should be a MySQL database with a table per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitjson_mysql/container.R Format \u00b6 The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitjson_csv/format.yaml If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml . FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESOUTOFRANGE FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESFATBURN FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESCARDIO FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESPEAK FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE FLAG_TO_MUTATE HEARTRATE_ZONE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME FLAG_TO_MUTATE LOCAL_END_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id EFFICIENCY FLAG_TO_MUTATE MINUTES_AFTER_WAKEUP FLAG_TO_MUTATE MINUTES_ASLEEP FLAG_TO_MUTATE MINUTES_AWAKE FLAG_TO_MUTATE MINUTES_TO_FALL_ASLEEP FLAG_TO_MUTATE MINUTES_IN_BED FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id TYPE_EPISODE_ID FLAG_TO_MUTATE DURATION FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE LEVEL FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2, we support both. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API . See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}}","title":"fitbitjson_mysql"},{"location":"datastreams/fitbitjson-mysql/#fitbitjson_mysql","text":"This data stream handles Fitbit sensor data downloaded using the Fitbit Web API and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your sensor data in a MySQL database, RAPIDS can process it.","title":"fitbitjson_mysql"},{"location":"datastreams/fitbitjson-mysql/#container","text":"The container should be a MySQL database with a table per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitjson_mysql/container.R","title":"Container"},{"location":"datastreams/fitbitjson-mysql/#format","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitjson_csv/format.yaml If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml . FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESOUTOFRANGE FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESFATBURN FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESCARDIO FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESPEAK FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE FLAG_TO_MUTATE HEARTRATE_ZONE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME FLAG_TO_MUTATE LOCAL_END_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id EFFICIENCY FLAG_TO_MUTATE MINUTES_AFTER_WAKEUP FLAG_TO_MUTATE MINUTES_ASLEEP FLAG_TO_MUTATE MINUTES_AWAKE FLAG_TO_MUTATE MINUTES_TO_FALL_ASLEEP FLAG_TO_MUTATE MINUTES_IN_BED FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id TYPE_EPISODE_ID FLAG_TO_MUTATE DURATION FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE LEVEL FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2, we support both. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API . See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}}","title":"Format"},{"location":"datastreams/fitbitparsed-csv/","text":"fitbitparsed_csv \u00b6 This data stream handles Fitbit sensor data downloaded using the Fitbit Web API , parsed , and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a CSV file, RAPIDS can process it. What is the difference between JSON and plain data streams Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit\u2019s API. However, if, for some reason, you don\u2019t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . Example of a valid CSV file \"device_id\",\"heartrate\",\"heartrate_zone\",\"local_date_time\",\"timestamp\" \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:00:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:01:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",67,\"outofrange\",\"2020-04-23 00:02:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:03:00\",0 Container \u00b6 The container should be a CSV file per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitparsed_csv/container.R Format \u00b6 The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitparsed_mysql/format.yaml If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names. All columns are mandatory; however, all except device_id and local_date_time can be empty if you don\u2019t have that data. Just have in mind that some features will be empty if some of these columns are empty. FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR heartrate_daily_restinghr HEARTRATE_DAILY_CALORIESOUTOFRANGE heartrate_daily_caloriesoutofrange HEARTRATE_DAILY_CALORIESFATBURN heartrate_daily_caloriesfatburn HEARTRATE_DAILY_CALORIESCARDIO heartrate_daily_caloriescardio HEARTRATE_DAILY_CALORIESPEAK heartrate_daily_caloriespeak MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE heartrate HEARTRATE_ZONE heartrate_zone MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME local_start_date_time LOCAL_END_DATE_TIME local_end_date_time DEVICE_ID device_id EFFICIENCY efficiency MINUTES_AFTER_WAKEUP minutes_after_wakeup MINUTES_ASLEEP minutes_asleep MINUTES_AWAKE minutes_awake MINUTES_TO_FALL_ASLEEP minutes_to_fall_asleep MINUTES_IN_BED minutes_in_bed IS_MAIN_SLEEP is_main_sleep TYPE type MUTATION COLUMN_MAPPINGS (None) SCRIPTS - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. Example of the expected raw data device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id TYPE_EPISODE_ID type_episode_id DURATION duration IS_MAIN_SLEEP is_main_sleep TYPE type LEVEL level MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2, we support both. Example of the expected raw data device_id type_episode_id local_date_time duration level is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:36:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:37:30 660 asleep 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:48:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:46:30 420 light 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:53:30 1230 deep 1 stages FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"fitbitparsed_csv"},{"location":"datastreams/fitbitparsed-csv/#fitbitparsed_csv","text":"This data stream handles Fitbit sensor data downloaded using the Fitbit Web API , parsed , and stored in a CSV file. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a CSV file, RAPIDS can process it. What is the difference between JSON and plain data streams Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit\u2019s API. However, if, for some reason, you don\u2019t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream. Warning The CSV files have to use , as separator, \\ as escape character (do not escape \" with \"\" ), and wrap any string columns with \" . Example of a valid CSV file \"device_id\",\"heartrate\",\"heartrate_zone\",\"local_date_time\",\"timestamp\" \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:00:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:01:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",67,\"outofrange\",\"2020-04-23 00:02:00\",0 \"a748ee1a-1d0b-4ae9-9074-279a2b6ba524\",69,\"outofrange\",\"2020-04-23 00:03:00\",0","title":"fitbitparsed_csv"},{"location":"datastreams/fitbitparsed-csv/#container","text":"The container should be a CSV file per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitparsed_csv/container.R","title":"Container"},{"location":"datastreams/fitbitparsed-csv/#format","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitparsed_mysql/format.yaml If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names. All columns are mandatory; however, all except device_id and local_date_time can be empty if you don\u2019t have that data. Just have in mind that some features will be empty if some of these columns are empty. FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR heartrate_daily_restinghr HEARTRATE_DAILY_CALORIESOUTOFRANGE heartrate_daily_caloriesoutofrange HEARTRATE_DAILY_CALORIESFATBURN heartrate_daily_caloriesfatburn HEARTRATE_DAILY_CALORIESCARDIO heartrate_daily_caloriescardio HEARTRATE_DAILY_CALORIESPEAK heartrate_daily_caloriespeak MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE heartrate HEARTRATE_ZONE heartrate_zone MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME local_start_date_time LOCAL_END_DATE_TIME local_end_date_time DEVICE_ID device_id EFFICIENCY efficiency MINUTES_AFTER_WAKEUP minutes_after_wakeup MINUTES_ASLEEP minutes_asleep MINUTES_AWAKE minutes_awake MINUTES_TO_FALL_ASLEEP minutes_to_fall_asleep MINUTES_IN_BED minutes_in_bed IS_MAIN_SLEEP is_main_sleep TYPE type MUTATION COLUMN_MAPPINGS (None) SCRIPTS - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. Example of the expected raw data device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id TYPE_EPISODE_ID type_episode_id DURATION duration IS_MAIN_SLEEP is_main_sleep TYPE type LEVEL level MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2, we support both. Example of the expected raw data device_id type_episode_id local_date_time duration level is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:36:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:37:30 660 asleep 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:48:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:46:30 420 light 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:53:30 1230 deep 1 stages FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"Format"},{"location":"datastreams/fitbitparsed-mysql/","text":"fitbitparsed_mysql \u00b6 This data stream handles Fitbit sensor data downloaded using the Fitbit Web API , parsed , and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a MySQL database, RAPIDS can process it. What is the difference between JSON and plain data streams Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit\u2019s API. However, if, for some reason, you don\u2019t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream. Container \u00b6 The container should be a MySQL database with a table per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitparsed_mysql/container.R Format \u00b6 The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitparsed_mysql/format.yaml If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names. All columns are mandatory; however, all except device_id and local_date_time can be empty if you don\u2019t have that data. Just have in mind that some features will be empty if some of these columns are empty. FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR heartrate_daily_restinghr HEARTRATE_DAILY_CALORIESOUTOFRANGE heartrate_daily_caloriesoutofrange HEARTRATE_DAILY_CALORIESFATBURN heartrate_daily_caloriesfatburn HEARTRATE_DAILY_CALORIESCARDIO heartrate_daily_caloriescardio HEARTRATE_DAILY_CALORIESPEAK heartrate_daily_caloriespeak MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE heartrate HEARTRATE_ZONE heartrate_zone MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME local_start_date_time LOCAL_END_DATE_TIME local_end_date_time DEVICE_ID device_id EFFICIENCY efficiency MINUTES_AFTER_WAKEUP minutes_after_wakeup MINUTES_ASLEEP minutes_asleep MINUTES_AWAKE minutes_awake MINUTES_TO_FALL_ASLEEP minutes_to_fall_asleep MINUTES_IN_BED minutes_in_bed IS_MAIN_SLEEP is_main_sleep TYPE type MUTATION COLUMN_MAPPINGS (None) SCRIPTS - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. Example of the expected raw data device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id TYPE_EPISODE_ID type_episode_id DURATION duration IS_MAIN_SLEEP is_main_sleep TYPE type LEVEL level MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2, we support both. Example of the expected raw data device_id type_episode_id local_date_time duration level is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:36:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:37:30 660 asleep 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:48:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:46:30 420 light 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:53:30 1230 deep 1 stages FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"fitbitparsed_mysql"},{"location":"datastreams/fitbitparsed-mysql/#fitbitparsed_mysql","text":"This data stream handles Fitbit sensor data downloaded using the Fitbit Web API , parsed , and stored in a MySQL database. Please note that RAPIDS cannot query the API directly; you need to use other available tools or implement your own. Once you have your parsed sensor data in a MySQL database, RAPIDS can process it. What is the difference between JSON and plain data streams Most people will only need fitbitjson_* because they downloaded and stored their data directly from Fitbit\u2019s API. However, if, for some reason, you don\u2019t have access to that JSON data and instead only have the parsed data (columns and rows), you can use this data stream.","title":"fitbitparsed_mysql"},{"location":"datastreams/fitbitparsed-mysql/#container","text":"The container should be a MySQL database with a table per sensor, each containing all participants\u2019 data. The script to connect and download data from this container is at: src/data/streams/fitbitparsed_mysql/container.R","title":"Container"},{"location":"datastreams/fitbitparsed-mysql/#format","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitparsed_mysql/format.yaml If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names. All columns are mandatory; however, all except device_id and local_date_time can be empty if you don\u2019t have that data. Just have in mind that some features will be empty if some of these columns are empty. FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR heartrate_daily_restinghr HEARTRATE_DAILY_CALORIESOUTOFRANGE heartrate_daily_caloriesoutofrange HEARTRATE_DAILY_CALORIESFATBURN heartrate_daily_caloriesfatburn HEARTRATE_DAILY_CALORIESCARDIO heartrate_daily_caloriescardio HEARTRATE_DAILY_CALORIESPEAK heartrate_daily_caloriespeak MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE heartrate HEARTRATE_ZONE heartrate_zone MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME local_start_date_time LOCAL_END_DATE_TIME local_end_date_time DEVICE_ID device_id EFFICIENCY efficiency MINUTES_AFTER_WAKEUP minutes_after_wakeup MINUTES_ASLEEP minutes_asleep MINUTES_AWAKE minutes_awake MINUTES_TO_FALL_ASLEEP minutes_to_fall_asleep MINUTES_IN_BED minutes_in_bed IS_MAIN_SLEEP is_main_sleep TYPE type MUTATION COLUMN_MAPPINGS (None) SCRIPTS - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. Example of the expected raw data device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id TYPE_EPISODE_ID type_episode_id DURATION duration IS_MAIN_SLEEP is_main_sleep TYPE type LEVEL level MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2, we support both. Example of the expected raw data device_id type_episode_id local_date_time duration level is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:36:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:37:30 660 asleep 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:48:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:46:30 420 light 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:53:30 1230 deep 1 stages FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"Format"},{"location":"datastreams/mandatory-empatica-format/","text":"Mandatory Empatica Format \u00b6 This is a description of the format RAPIDS needs to process data for the following Empatica sensors. EMPATICA_ACCELEROMETER RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_VALUES_0 x axis of acceleration DOUBLE_VALUES_1 y axis of acceleration DOUBLE_VALUES_2 z axis of acceleration EMPATICA_HEARTRATE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device HEARTRATE Intraday heartrate EMPATICA_TEMPERATURE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device TEMPERATURE temperature EMPATICA_ELECTRODERMAL_ACTIVITY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device ELECTRODERMAL_ACTIVITY electrical conductance EMPATICA_BLOOD_VOLUME_PULSE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device BLOOD_VOLUME_PULSE blood volume pulse EMPATICA_INTER_BEAT_INTERVAL RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device INTER_BEAT_INTERVAL inter beat interval EMPATICA_TAGS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device TAGS tags","title":"Mandatory Empatica Format"},{"location":"datastreams/mandatory-empatica-format/#mandatory-empatica-format","text":"This is a description of the format RAPIDS needs to process data for the following Empatica sensors. EMPATICA_ACCELEROMETER RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_VALUES_0 x axis of acceleration DOUBLE_VALUES_1 y axis of acceleration DOUBLE_VALUES_2 z axis of acceleration EMPATICA_HEARTRATE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device HEARTRATE Intraday heartrate EMPATICA_TEMPERATURE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device TEMPERATURE temperature EMPATICA_ELECTRODERMAL_ACTIVITY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device ELECTRODERMAL_ACTIVITY electrical conductance EMPATICA_BLOOD_VOLUME_PULSE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device BLOOD_VOLUME_PULSE blood volume pulse EMPATICA_INTER_BEAT_INTERVAL RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device INTER_BEAT_INTERVAL inter beat interval EMPATICA_TAGS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) DEVICE_ID A string that uniquely identifies a device TAGS tags","title":"Mandatory Empatica Format"},{"location":"datastreams/mandatory-fitbit-format/","text":"Mandatory Fitbit Format \u00b6 This is a description of the format RAPIDS needs to process data for the following Fitbit sensors. FITBIT_HEARTRATE_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device HEARTRATE_DAILY_RESTINGHR Daily resting heartrate HEARTRATE_DAILY_CALORIESOUTOFRANGE Calories spent while heartrate was oustide a heartrate zone HEARTRATE_DAILY_CALORIESFATBURN Calories spent while heartrate was inside the fat burn zone HEARTRATE_DAILY_CALORIESCARDIO Calories spent while heartrate was inside the cardio zone HEARTRATE_DAILY_CALORIESPEAK Calories spent while heartrate was inside the peak zone FITBIT_HEARTRATE_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device HEARTRATE Intraday heartrate HEARTRATE_ZONE Heartrate zone that HEARTRATE belongs to. It is based on the heartrate zone ranges of each device FITBIT_SLEEP_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd 00:00:00 , the date is the same as the start date of a daily sleep episode if its time is after SLEEP_SUMMARY_LAST_NIGHT_END, otherwise it is the day before the start date of that sleep episode LOCAL_START_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss representing the start of a daily sleep episode LOCAL_END_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss representing the end of a daily sleep episode DEVICE_ID A string that uniquely identifies a device EFFICIENCY Sleep efficiency computed by fitbit as time asleep / (total time in bed - time to fall asleep) MINUTES_AFTER_WAKEUP Minutes the participant spent in bed after waking up MINUTES_ASLEEP Minutes the participant was asleep MINUTES_AWAKE Minutes the participant was awake MINUTES_TO_FALL_ASLEEP Minutes the participant spent in bed before falling asleep MINUTES_IN_BED Minutes the participant spent in bed across the sleep episode IS_MAIN_SLEEP 0 if this episode is a nap, or 1 if it is a main sleep episode TYPE stages or classic sleep data FITBIT_SLEEP_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss , this either is a copy of LOCAL_START_DATE_TIME or LOCAL_END_DATE_TIME depending on which column is used to assign an episode to a specific day DEVICE_ID A string that uniquely identifies a device TYPE_EPISODE_ID An id for each unique main or nap episode. Main and nap episodes have different levels, each row in this table is one of such levels, so multiple rows can have the same TYPE_EPISODE_ID DURATION Duration of the episode level in minutes IS_MAIN_SLEEP 0 if this episode level belongs to a nap, or 1 if it belongs to a main sleep episode TYPE type of level: stages or classic sleep data LEVEL For stages levels one of wake , deep , light , or rem . For classic levels one of awake , restless , and asleep FITBIT_STEPS_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device STEPS Daily step count FITBIT_STEPS_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device STEPS Intraday step count (usually every minute)","title":"Mandatory Fitbit Format"},{"location":"datastreams/mandatory-fitbit-format/#mandatory-fitbit-format","text":"This is a description of the format RAPIDS needs to process data for the following Fitbit sensors. FITBIT_HEARTRATE_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device HEARTRATE_DAILY_RESTINGHR Daily resting heartrate HEARTRATE_DAILY_CALORIESOUTOFRANGE Calories spent while heartrate was oustide a heartrate zone HEARTRATE_DAILY_CALORIESFATBURN Calories spent while heartrate was inside the fat burn zone HEARTRATE_DAILY_CALORIESCARDIO Calories spent while heartrate was inside the cardio zone HEARTRATE_DAILY_CALORIESPEAK Calories spent while heartrate was inside the peak zone FITBIT_HEARTRATE_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device HEARTRATE Intraday heartrate HEARTRATE_ZONE Heartrate zone that HEARTRATE belongs to. It is based on the heartrate zone ranges of each device FITBIT_SLEEP_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd 00:00:00 , the date is the same as the start date of a daily sleep episode if its time is after SLEEP_SUMMARY_LAST_NIGHT_END, otherwise it is the day before the start date of that sleep episode LOCAL_START_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss representing the start of a daily sleep episode LOCAL_END_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss representing the end of a daily sleep episode DEVICE_ID A string that uniquely identifies a device EFFICIENCY Sleep efficiency computed by fitbit as time asleep / (total time in bed - time to fall asleep) MINUTES_AFTER_WAKEUP Minutes the participant spent in bed after waking up MINUTES_ASLEEP Minutes the participant was asleep MINUTES_AWAKE Minutes the participant was awake MINUTES_TO_FALL_ASLEEP Minutes the participant spent in bed before falling asleep MINUTES_IN_BED Minutes the participant spent in bed across the sleep episode IS_MAIN_SLEEP 0 if this episode is a nap, or 1 if it is a main sleep episode TYPE stages or classic sleep data FITBIT_SLEEP_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss , this either is a copy of LOCAL_START_DATE_TIME or LOCAL_END_DATE_TIME depending on which column is used to assign an episode to a specific day DEVICE_ID A string that uniquely identifies a device TYPE_EPISODE_ID An id for each unique main or nap episode. Main and nap episodes have different levels, each row in this table is one of such levels, so multiple rows can have the same TYPE_EPISODE_ID DURATION Duration of the episode level in minutes IS_MAIN_SLEEP 0 if this episode level belongs to a nap, or 1 if it belongs to a main sleep episode TYPE type of level: stages or classic sleep data LEVEL For stages levels one of wake , deep , light , or rem . For classic levels one of awake , restless , and asleep FITBIT_STEPS_SUMMARY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device STEPS Daily step count FITBIT_STEPS_INTRADAY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged (automatically created by RAPIDS) LOCAL_DATE_TIME Date time string with format yyyy-mm-dd hh:mm:ss DEVICE_ID A string that uniquely identifies a device STEPS Intraday step count (usually every minute)","title":"Mandatory Fitbit Format"},{"location":"datastreams/mandatory-phone-format/","text":"Mandatory Phone Format \u00b6 This is a description of the format RAPIDS needs to process data for the following PHONE sensors. See examples in the CSV files inside rapids_example_csv.zip PHONE_ACCELEROMETER RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_VALUES_0 x axis of acceleration DOUBLE_VALUES_1 y axis of acceleration DOUBLE_VALUES_2 z axis of acceleration PHONE_ACTIVITY_RECOGNITION RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device ACTIVITY_NAME An string that denotes current activity name: in_vehicle , on_bicycle , on_foot , still , unknown , tilting , walking or running ACTIVITY_TYPE An integer (ranged from 0 to 8) that denotes current activity type CONFIDENCE An integer (ranged from 0 to 100) that denotes the prediction accuracy PHONE_APPLICATIONS_CRASHES RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name APPLICATION_VERSION Application\u2019s version code ERROR_SHORT Short description of the error ERROR_LONG More verbose version of the error description ERROR_CONDITION 1 = code error; 2 = non-responsive (ANR error) IS_SYSTEM_APP Device\u2019s pre-installed application PHONE_APPLICATIONS_FOREGROUND RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name IS_SYSTEM_APP Device\u2019s pre-installed application PHONE_APPLICATIONS_NOTIFICATIONS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name TEXT Notification\u2019s header text, not the content SOUND Notification\u2019s sound source (if applicable) VIBRATE Notification\u2019s vibration pattern (if applicable) DEFAULTS If notification was delivered according to device\u2019s default settings FLAGS An integer that denotes Android notification flag PHONE_BATTERY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device BATTERY_STATUS An integer that denotes battery status: 0 or 1 = unknown, 2 = charging, 3 = discharging, 4 = not charging, 5 = full BATTERY_LEVEL An integer that denotes battery level, between 0 and BATTERY_SCALE BATTERY_SCALE An integer that denotes the maximum battery level PHONE_BLUETOOTH RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device BT_ADDRESS MAC address of the device\u2019s Bluetooth sensor BT_NAME User assigned name of the device\u2019s Bluetooth sensor BT_RSSI The RSSI dB to the scanned device PHONE_CALLS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device CALL_TYPE An integer that denotes call type: 1 = incoming, 2 = outgoing, 3 = missed CALL_DURATION Length of the call session TRACE SHA-1 one-way source/target of the call PHONE_CONVERSATION RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_ENERGY A number that denotes the amplitude of an audio sample (L2-norm of the audio frame) INFERENCE An integer (ranged from 0 to 3) that denotes the type of an audio sample: 0 = silence, 1 = noise, 2 = voice, 3 = unknown DOUBLE_CONVO_START UNIX timestamp (13 digits) of the beginning of a conversation DOUBLE_CONVO_END UNIX timestamp (13 digits) of the end of a conversation PHONE_KEYBOARD RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME The application\u2019s package name of keyboard interaction BEFORE_TEXT The previous keyboard input (empty if password) CURRENT_TEXT The current keyboard input (empty if password) IS_PASSWORD An integer: 0 = not password; 1 = password PHONE_LIGHT RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_LIGHT_LUX The ambient luminance in lux units ACCURACY An integer that denotes the sensor\u2019s accuracy level: 3 = maximum accuracy, 2 = medium accuracy, 1 = low accuracy PHONE_LOCATIONS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_LATITUDE The location\u2019s latitude, in degrees DOUBLE_LONGITUDE The location\u2019s longitude, in degrees DOUBLE_BEARING The location\u2019s bearing, in degrees DOUBLE_SPEED The speed if available, in meters/second over ground DOUBLE_ALTITUDE The altitude if available, in meters above sea level PROVIDER A string that denotes the provider: gps , fused or network ACCURACY The estimated location accuracy PHONE_LOG RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device LOG_MESSAGE A string that denotes log message PHONE_MESSAGES RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device MESSAGE_TYPE An integer that denotes message type: 1 = received, 2 = sent TRACE SHA-1 one-way source/target of the message PHONE_SCREEN RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device SCREEN_STATUS An integer that denotes screen status: 0 = off, 1 = on, 2 = locked, 3 = unlocked PHONE_WIFI_CONNECTED RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device MAC_ADDRESS Device\u2019s MAC address SSID Currently connected access point network name BSSID Currently connected access point MAC address PHONE_WIFI_VISIBLE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device SSID Detected access point network name BSSID Detected access point MAC address SECURITY Active security protocols FREQUENCY Wi-Fi band frequency (e.g., 2427, 5180), in Hz RSSI RSSI dB to the scanned device","title":"Mandatory Phone Format"},{"location":"datastreams/mandatory-phone-format/#mandatory-phone-format","text":"This is a description of the format RAPIDS needs to process data for the following PHONE sensors. See examples in the CSV files inside rapids_example_csv.zip PHONE_ACCELEROMETER RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_VALUES_0 x axis of acceleration DOUBLE_VALUES_1 y axis of acceleration DOUBLE_VALUES_2 z axis of acceleration PHONE_ACTIVITY_RECOGNITION RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device ACTIVITY_NAME An string that denotes current activity name: in_vehicle , on_bicycle , on_foot , still , unknown , tilting , walking or running ACTIVITY_TYPE An integer (ranged from 0 to 8) that denotes current activity type CONFIDENCE An integer (ranged from 0 to 100) that denotes the prediction accuracy PHONE_APPLICATIONS_CRASHES RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name APPLICATION_VERSION Application\u2019s version code ERROR_SHORT Short description of the error ERROR_LONG More verbose version of the error description ERROR_CONDITION 1 = code error; 2 = non-responsive (ANR error) IS_SYSTEM_APP Device\u2019s pre-installed application PHONE_APPLICATIONS_FOREGROUND RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name IS_SYSTEM_APP Device\u2019s pre-installed application PHONE_APPLICATIONS_NOTIFICATIONS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME Application\u2019s package name APPLICATION_NAME Application\u2019s localized name TEXT Notification\u2019s header text, not the content SOUND Notification\u2019s sound source (if applicable) VIBRATE Notification\u2019s vibration pattern (if applicable) DEFAULTS If notification was delivered according to device\u2019s default settings FLAGS An integer that denotes Android notification flag PHONE_BATTERY RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device BATTERY_STATUS An integer that denotes battery status: 0 or 1 = unknown, 2 = charging, 3 = discharging, 4 = not charging, 5 = full BATTERY_LEVEL An integer that denotes battery level, between 0 and BATTERY_SCALE BATTERY_SCALE An integer that denotes the maximum battery level PHONE_BLUETOOTH RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device BT_ADDRESS MAC address of the device\u2019s Bluetooth sensor BT_NAME User assigned name of the device\u2019s Bluetooth sensor BT_RSSI The RSSI dB to the scanned device PHONE_CALLS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device CALL_TYPE An integer that denotes call type: 1 = incoming, 2 = outgoing, 3 = missed CALL_DURATION Length of the call session TRACE SHA-1 one-way source/target of the call PHONE_CONVERSATION RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_ENERGY A number that denotes the amplitude of an audio sample (L2-norm of the audio frame) INFERENCE An integer (ranged from 0 to 3) that denotes the type of an audio sample: 0 = silence, 1 = noise, 2 = voice, 3 = unknown DOUBLE_CONVO_START UNIX timestamp (13 digits) of the beginning of a conversation DOUBLE_CONVO_END UNIX timestamp (13 digits) of the end of a conversation PHONE_KEYBOARD RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device PACKAGE_NAME The application\u2019s package name of keyboard interaction BEFORE_TEXT The previous keyboard input (empty if password) CURRENT_TEXT The current keyboard input (empty if password) IS_PASSWORD An integer: 0 = not password; 1 = password PHONE_LIGHT RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_LIGHT_LUX The ambient luminance in lux units ACCURACY An integer that denotes the sensor\u2019s accuracy level: 3 = maximum accuracy, 2 = medium accuracy, 1 = low accuracy PHONE_LOCATIONS RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device DOUBLE_LATITUDE The location\u2019s latitude, in degrees DOUBLE_LONGITUDE The location\u2019s longitude, in degrees DOUBLE_BEARING The location\u2019s bearing, in degrees DOUBLE_SPEED The speed if available, in meters/second over ground DOUBLE_ALTITUDE The altitude if available, in meters above sea level PROVIDER A string that denotes the provider: gps , fused or network ACCURACY The estimated location accuracy PHONE_LOG RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device LOG_MESSAGE A string that denotes log message PHONE_MESSAGES RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device MESSAGE_TYPE An integer that denotes message type: 1 = received, 2 = sent TRACE SHA-1 one-way source/target of the message PHONE_SCREEN RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device SCREEN_STATUS An integer that denotes screen status: 0 = off, 1 = on, 2 = locked, 3 = unlocked PHONE_WIFI_CONNECTED RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device MAC_ADDRESS Device\u2019s MAC address SSID Currently connected access point network name BSSID Currently connected access point MAC address PHONE_WIFI_VISIBLE RAPIDS column Description TIMESTAMP An UNIX timestamp (13 digits) when a row of data was logged DEVICE_ID A string that uniquely identifies a device SSID Detected access point network name BSSID Detected access point MAC address SECURITY Active security protocols FREQUENCY Wi-Fi band frequency (e.g., 2427, 5180), in Hz RSSI RSSI dB to the scanned device","title":"Mandatory Phone Format"},{"location":"developers/documentation/","text":"Documentation \u00b6 We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically. Set up development environment \u00b6 Make sure your conda environment is active pip install mkdocs pip install mkdocs-material Preview \u00b6 Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve File Structure \u00b6 The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation. Reference \u00b6 Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md ) Extras \u00b6 You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Documentation"},{"location":"developers/documentation/#documentation","text":"We use mkdocs with the material theme to write these docs. Whenever you make any changes, just push them back to the repo and the documentation will be deployed automatically.","title":"Documentation"},{"location":"developers/documentation/#set-up-development-environment","text":"Make sure your conda environment is active pip install mkdocs pip install mkdocs-material","title":"Set up development environment"},{"location":"developers/documentation/#preview","text":"Run the following command in RAPIDS root folder and go to http://127.0.0.1:8000 : mkdocs serve","title":"Preview"},{"location":"developers/documentation/#file-structure","text":"The documentation config file is /mkdocs.yml , if you are adding new .md files to the docs modify the nav attribute at the bottom of that file. You can use the hierarchy there to find all the files that appear in the documentation.","title":"File Structure"},{"location":"developers/documentation/#reference","text":"Check this page to get familiar with the different visual elements we can use in the docs (admonitions, code blocks, tables, etc.) You can also refer to /docs/setup/installation.md and /docs/setup/configuration.md to see practical examples of these elements. Hint Any links to internal pages should be relative to the current page. For example, any link from this page (documentation) which is inside ./developers should begin with ../ to go one folder level up like: [ mylink ]( ../setup/installation.md )","title":"Reference"},{"location":"developers/documentation/#extras","text":"You can insert emojis using this syntax :[SOURCE]-[ICON_NAME] from the following sources: https://materialdesignicons.com/ https://fontawesome.com/icons/tasks?style=solid https://primer.style/octicons/ You can use this page to create markdown tables more easily","title":"Extras"},{"location":"developers/git-flow/","text":"Git Flow \u00b6 We use the develop/master variation of the OneFlow git flow Add New Features \u00b6 We use feature (topic) branches to implement new features Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. Starting your feature branch Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Merging back your feature branch If your changes took time to be implemented it is possible that there are new commits in our develop branch, so we need to rebase your feature branch. Fetch the latest changes to develop git fetch origin develop Rebase your feature branch git checkout feature/feature1 git rebase -i develop Integrate your new feature to develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Starting your feature branch Fork and clone our repository on Github Switch to the latest develop git checkout develop Create your feature branch git checkout -b feature/external-test Add, modify or delete the necessary files to add your new feature Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Merging back your feature branch If your changes took time to be implemented, it is possible that there are new commits in our develop branch, so we need to rebase your feature branch. Add our repo as another remote git remote add upstream https://github.com/carissalow/rapids/ Fetch the latest changes to develop git fetch upstream develop Rebase your feature branch git checkout feature/external-test git rebase -i develop Push your feature branch online git push --set-upstream origin feature/external-test Open a pull request to the develop branch using Github\u2019s GUI Release a New Version \u00b6 Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log) Release a Hotfix \u00b6 Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Git Flow"},{"location":"developers/git-flow/#git-flow","text":"We use the develop/master variation of the OneFlow git flow","title":"Git Flow"},{"location":"developers/git-flow/#add-new-features","text":"We use feature (topic) branches to implement new features Internal Developer You are an internal developer if you have writing permissions to the repository. Most feature branches are never pushed to the repo, only do so if you expect that its development will take days (to avoid losing your work if you computer is damaged). Otherwise follow the following instructions to locally rebase your feature branch into develop and push those rebased changes online. Starting your feature branch Pull the latest develop git checkout develop git pull Create your feature branch git checkout -b feature/feature1 Add, modify or delete the necessary files to add your new feature Update the change log ( docs/change-log.md ) Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Merging back your feature branch If your changes took time to be implemented it is possible that there are new commits in our develop branch, so we need to rebase your feature branch. Fetch the latest changes to develop git fetch origin develop Rebase your feature branch git checkout feature/feature1 git rebase -i develop Integrate your new feature to develop git checkout develop git merge --no-ff feature/feature1 # (use the default merge message) git push origin develop git branch -d feature/feature1 External Developer You are an external developer if you do NOT have writing permissions to the repository. Starting your feature branch Fork and clone our repository on Github Switch to the latest develop git checkout develop Create your feature branch git checkout -b feature/external-test Add, modify or delete the necessary files to add your new feature Stage and commit your changes using VS Code git GUI or the following commands git add modified-file1 modified-file2 git commit -m \"Add my new feature\" # use a concise description Merging back your feature branch If your changes took time to be implemented, it is possible that there are new commits in our develop branch, so we need to rebase your feature branch. Add our repo as another remote git remote add upstream https://github.com/carissalow/rapids/ Fetch the latest changes to develop git fetch upstream develop Rebase your feature branch git checkout feature/external-test git rebase -i develop Push your feature branch online git push --set-upstream origin feature/external-test Open a pull request to the develop branch using Github\u2019s GUI","title":"Add New Features"},{"location":"developers/git-flow/#release-a-new-version","text":"Pull the latest develop git checkout develop git pull Create a new release branch git describe --abbrev = 0 --tags # Bump the release (0.1.0 to 0.2.0 => NEW_HOTFIX) git checkout -b release/v [ NEW_RELEASE ] develop Add new tag git tag v [ NEW_RELEASE ] Merge and push the release branch git checkout develop git merge release/v [ NEW_RELEASE ] git push --tags origin develop git branch -d release/v [ NEW_RELEASE ] Fast-forward master git checkout master git merge --ff-only develop git push Go to GitHub and create a new release based on the newest tag v[NEW_RELEASE] (remember to add the change log)","title":"Release a New Version"},{"location":"developers/git-flow/#release-a-hotfix","text":"Pull the latest master git checkout master git pull Start a hotfix branch git describe --abbrev = 0 --tags # Bump the hotfix (0.1.0 to 0.1.1 => NEW_HOTFIX) git checkout -b hotfix/v [ NEW_HOTFIX ] master Fix whatever needs to be fixed Update the change log Tag and merge the hotfix git tag v [ NEW_HOTFIX ] git checkout develop git merge hotfix/v [ NEW_HOTFIX ] git push --tags origin develop git branch -d hotfix/v [ NEW_HOTFIX ] Fast-forward master git checkout master git merge --ff-only v[NEW_HOTFIX] git push Go to GitHub and create a new release based on the newest tag v[NEW_HOTFIX] (remember to add the change log)","title":"Release a Hotfix"},{"location":"developers/remote-support/","text":"Remote Support \u00b6 We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open your RAPIDS root folder in a new VSCode window Open a new terminal in Visual Studio Code Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/remote-support/#remote-support","text":"We use the Live Share extension of Visual Studio Code to debug bugs when sharing data or database credentials is not possible. Install Visual Studio Code Open your RAPIDS root folder in a new VSCode window Open a new terminal in Visual Studio Code Terminal > New terminal Install the Live Share extension pack Press Ctrl + P or Cmd + P and run this command: >live share: start collaboration session Follow the instructions and share the session link you receive","title":"Remote Support"},{"location":"developers/test-cases/","text":"Test Cases \u00b6 Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available. Sensor Provider Periodic Frequency Event Phone Accelerometer Panda N N N Phone Accelerometer RAPIDS N N N Phone Activity Recognition RAPIDS N N N Phone Applications Foreground RAPIDS N N N Phone Battery RAPIDS Y Y N Phone Bluetooth Doryab N N N Phone Bluetooth RAPIDS Y Y Y Phone Calls RAPIDS Y Y N Phone Conversation RAPIDS Y Y N Phone Data Yield RAPIDS N N N Phone Light RAPIDS Y Y N Phone Locations Doryab N N N Phone Locations Barnett N N N Phone Messages RAPIDS Y Y N Phone Screen RAPIDS Y N N Phone WiFi Connected RAPIDS Y Y N Phone WiFi Visible RAPIDS Y Y N Fitbit Calories Intraday RAPIDS Y Y Y Fitbit Data Yield RAPIDS N N N Fitbit Heart Rate Summary RAPIDS N N N Fitbit Heart Rate Intraday RAPIDS N N N Fitbit Sleep Summary RAPIDS N N N Fitbit Sleep Intraday RAPIDS Y Y Y Fitbit Sleep Intraday PRICE Y Y Y Fitbit Steps Summary RAPIDS N N N Fitbit Steps Intraday RAPIDS N N N Messages (SMS) \u00b6 The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Calls \u00b6 Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files Screen \u00b6 Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Battery \u00b6 Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files Bluetooth \u00b6 The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files. WIFI \u00b6 There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files. Light \u00b6 The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Locations \u00b6 Description The participant\u2019s home location is (latitude=1, longitude=1). From Sat 10:56:00 to Sat 11:04:00, the center of the cluster is (latitude=-100, longitude=-100). From Sun 03:30:00 to Sun 03:47:00, the center of the cluster is (latitude=1, longitude=1). Home location is extracted from this period. From Sun 11:30:00 to Sun 11:38:00, the center of the cluster is (latitude=100, longitude=100). Application Foreground \u00b6 The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files. Activity Recognition \u00b6 The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files. Conversation \u00b6 The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files Keyboard \u00b6 The raw keyboard data file contains data for 4 days. The raw keyboard data contains records with difference in timestamp ranging from milliseconds to seconds. With difference in timestamps between consecutive records more than 5 seconds helps us to create separate sessions within the usage of the same app. This helps to verify the case where sessions have to be different. The raw keyboard data contains records where the difference in text is less than 5 seconds which makes it into 1 session but because of difference of app new session starts. This edge case determines the behaviour within particular app and also within 5 seconds. The raw keyboard data also contains the records where length of current_text varies between consecutive rows. This helps us to tests on the cases where input text is entered by auto-suggested or auto-correct operations. One three-minute episode with a 1-minute row on Sun 08:59:54.65 and 09:00:00,another on Sun 12:01:02 that are considering a single episode in multi-timezone event segments to showcase how inferring time zone data for Keyboard from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes. Fitbit Calories Intraday \u00b6 Description A five-minute sedentary episode on Fri 11:00:00 A one-minute sedentary episode on Sun 02:00:00. It exists in November but not in February in STZ A five-minute sedentary episode on Fri 11:58:00. It is split within two 30-min segments and the morning A three-minute lightly active episode on Fri 11:10:00, a one-minute at 11:18:00 and a one-minute 11:24:00. These check for start and end times of first/last/longest episode A three-minute fairly active episode on Fri 11:40:00, a one-minute at 11:48:00 and a one-minute 11:54:00. These check for start and end times of first/last/longest episode A three-minute very active episode on Fri 12:10:00, a one-minute at 12:18:00 and a one-minute 12:24:00. These check for start and end times of first/last/longest episode A eight-minute MVPA episode with intertwined fairly and very active rows on Fri 12:30:00 The above episodes contain six higmet (>= 3 MET) episodes and nine lowmet episodes. One two-minute sedentary episode with a 1-minute row on Sun 09:00:00 and another on Sun 12:01:01 that are considering a single episode in multi-timezone event segments to showcase how inferring time zone data for Fitbit from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes. A three-minute sedentary episode on Sat 08:59 that will be ignored for multi-timezone event segments. A three-minute sedentary episode on Sat 12:59 of which the first minute will be ignored for multi-timezone event segments since the test segment starts at 13:00 A three-minute sedentary episode on Sat 16:00 A four-minute sedentary episode on Sun 10:01 that will be ignored for Novembers\u2019s multi-timezone event segments since the test segment ends at 10am on that weekend. A three-minute very active episode on Sat 16:03. This episode and the one at 16:00 are counted as one for lowmet episodes Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit Fitbit Sleep Summary \u00b6 Description A main sleep episode that starts on Fri 20:00:00 and ends on Sat 02:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Fri) data. A nap that starts on Sat 04:00:00 and ends on Sat 06:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday\u2019s (Fri) data. A nap that starts on Sat 13:00:00 and ends on Sat 15:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Sat) data. A main sleep that starts on Sun 01:00:00 and ends on Sun 12:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday\u2019s (Sat) data. A main sleep that starts on Sun 23:00:00 and ends on Mon 07:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Sun) data. Any segment shorter than one day will be ignored for sleep RAPIDS features. Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit Fitbit Sleep Intraday \u00b6 Description A five-minute main sleep episode with asleep-classic level on Fri 11:00:00. An eight-hour main sleep episode on Fri 17:00:00. It is split into 2 parts for daily segment: a seven-hour sleep episode on Fri 17:00:00 and an one-hour sleep episode on Sat 00:00:00. A two-hour nap on Sat 01:00:00 that will be ignored for main sleep features. An one-hour nap on Sat 13:00:00 that will be ignored for main sleep features. An eight-hour main sleep episode on Sat 22:00:00. This episode ends on Sun 08:00:00 (NY) for March and Sun 06:00:00 (NY) for Novembers due to daylight savings. It will be considered for beforeMarchEvent segment and ignored for beforeNovemberEvent segment. A nine-hour main sleep episode on Sun 11:00:00. Start time will be assigned as NY time zone and converted to 14:00:00. A seven-hour main sleep episode on Mon 06:00:00. This episode will be split into two parts: a five-hour sleep episode on Mon 06:00:00 and a two-hour sleep episode on Mon 11:00:00. The first part will be discarded as it is before 11am (Last Night End) Any segment shorter than one day will be ignored for sleep PRICE features. Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit","title":"Test cases"},{"location":"developers/test-cases/#test-cases","text":"Along with the continued development and the addition of new sensors and features to the RAPIDS pipeline, tests for the currently available sensors and features are being implemented. Since this is a Work In Progress this page will be updated with the list of sensors and features for which testing is available. For each of the sensors listed a description of the data used for testing (test cases) are outline. Currently for all intent and testing purposes the tests/data/raw/test01/ contains all the test data files for testing android data formats and tests/data/raw/test02/ contains all the test data files for testing iOS data formats. It follows that the expected (verified output) are contained in the tests/data/processed/test01/ and tests/data/processed/test02/ for Android and iOS respectively. tests/data/raw/test03/ and tests/data/raw/test04/ contain data files for testing empty raw data files for android and iOS respectively. The following is a list of the sensors that testing is currently available. Sensor Provider Periodic Frequency Event Phone Accelerometer Panda N N N Phone Accelerometer RAPIDS N N N Phone Activity Recognition RAPIDS N N N Phone Applications Foreground RAPIDS N N N Phone Battery RAPIDS Y Y N Phone Bluetooth Doryab N N N Phone Bluetooth RAPIDS Y Y Y Phone Calls RAPIDS Y Y N Phone Conversation RAPIDS Y Y N Phone Data Yield RAPIDS N N N Phone Light RAPIDS Y Y N Phone Locations Doryab N N N Phone Locations Barnett N N N Phone Messages RAPIDS Y Y N Phone Screen RAPIDS Y N N Phone WiFi Connected RAPIDS Y Y N Phone WiFi Visible RAPIDS Y Y N Fitbit Calories Intraday RAPIDS Y Y Y Fitbit Data Yield RAPIDS N N N Fitbit Heart Rate Summary RAPIDS N N N Fitbit Heart Rate Intraday RAPIDS N N N Fitbit Sleep Summary RAPIDS N N N Fitbit Sleep Intraday RAPIDS Y Y Y Fitbit Sleep Intraday PRICE Y Y Y Fitbit Steps Summary RAPIDS N N N Fitbit Steps Intraday RAPIDS N N N","title":"Test Cases"},{"location":"developers/test-cases/#messages-sms","text":"The raw message data file contains data for 2 separate days. The data for the first day contains records 5 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The raw message data contains records for both message_types (i.e. recieved and sent ) in both days in all epochs. The number records with each message_types per epoch is randomly distributed There is at least one records with each message_types per epoch. There is one raw message data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Messages (SMS)"},{"location":"developers/test-cases/#calls","text":"Due to the difference in the format of the raw call data for iOS and Android the following is the expected results the calls_with_datetime_unified.csv . This would give a better idea of the use cases being tested since the calls_with_datetime_unified.csv would make both the iOS and Android data comparable. The call data would contain data for 2 days. The data for the first day contains 6 records for every epoch . The second day's data contains 6 records for each of only 2 epoch (currently morning and evening ) The call data contains records for all call_types (i.e. incoming , outgoing and missed ) in both days in all epochs. The number records with each of the call_types per epoch is randomly distributed. There is at least one records with each call_types per epoch. There is one call data file each, as described above, for testing both iOS and Android data. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Calls"},{"location":"developers/test-cases/#screen","text":"Due to the difference in the format of the raw screen data for iOS and Android the following is the expected results the screen_deltas.csv . This would give a better idea of the use cases being tested since the screen_eltas.csv would make both the iOS and Android data comparable These files are used to calculate the features for the screen sensor The screen delta data file contains data for 1 day. The screen delta data contains 1 record to represent an unlock episode that falls within an epoch for every epoch . The screen delta data contains 1 record to represent an unlock episode that falls across the boundary of 2 epochs. Namely the unlock episode starts in one epoch and ends in the next, thus there is a record for unlock episodes that fall across night to morning , morning to afternoon and finally afternoon to night The testing is done for unlock episode_type. There is one screen data file each for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Screen"},{"location":"developers/test-cases/#battery","text":"Due to the difference in the format of the raw battery data for iOS and Android as well as versions of iOS the following is the expected results the battery_deltas.csv . This would give a better idea of the use cases being tested since the battery_deltas.csv would make both the iOS and Android data comparable. These files are used to calculate the features for the battery sensor. The battery delta data file contains data for 1 day. The battery delta data contains 1 record each for a charging and discharging episode that falls within an epoch for every epoch . Thus, for the daily epoch there would be multiple charging and discharging episodes Since either a charging episode or a discharging episode and not both can occur across epochs, in order to test episodes that occur across epochs alternating episodes of charging and discharging episodes that fall across night to morning , morning to afternoon and finally afternoon to night are present in the battery delta data. This starts with a discharging episode that begins in night and end in morning . There is one battery data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files","title":"Battery"},{"location":"developers/test-cases/#bluetooth","text":"The raw Bluetooth data file contains data for 1 day. The raw Bluetooth data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) An option of 5 Bluetooth devices are randomly distributed throughout the data records. There is one raw Bluetooth data file each, for testing both iOS and Android data formats. There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Bluetooth"},{"location":"developers/test-cases/#wifi","text":"There are 2 data files ( wifi_raw.csv and sensor_wifi_raw.csv ) for each fake participant for each phone platform. The raw WIFI data files contain data for 1 day. The sensor_wifi_raw.csv data contains at least 2 records for each epoch . Each epoch has a record with a timestamp for the beginning boundary for that epoch and a record with a timestamp for the ending boundary for that epoch . (e.g. For the morning epoch there is a record with a timestamp for 6:00AM and another record with a timestamp for 11:59:59AM . These are to test edge cases) The wifi_raw.csv data contains 3 records with random timestamps for each epoch to represent visible broadcasting WIFI network. This file is empty for the iOS phone testing data. An option of 10 access point devices is randomly distributed throughout the data records. 5 each for sensor_wifi_raw.csv and wifi_raw.csv . There data files for testing both iOS and Android data formats. There are also additional empty data files for both android and iOS for testing empty data files.","title":"WIFI"},{"location":"developers/test-cases/#light","text":"The raw light data file contains data for 1 day. The raw light data contains 3 or 4 rows of data for each epoch except night . The single row of data for night is for testing features for single values inputs. (Example testing the standard deviation of one input value) Since light is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Light"},{"location":"developers/test-cases/#locations","text":"Description The participant\u2019s home location is (latitude=1, longitude=1). From Sat 10:56:00 to Sat 11:04:00, the center of the cluster is (latitude=-100, longitude=-100). From Sun 03:30:00 to Sun 03:47:00, the center of the cluster is (latitude=1, longitude=1). Home location is extracted from this period. From Sun 11:30:00 to Sun 11:38:00, the center of the cluster is (latitude=100, longitude=100).","title":"Locations"},{"location":"developers/test-cases/#application-foreground","text":"The raw application foreground data file contains data for 1 day. The raw application foreground data contains 7 - 9 rows of data for each epoch . The records for each epoch contains apps that are randomly selected from a list of apps that are from the MULTIPLE_CATEGORIES and SINGLE_CATEGORIES (See testing_config.yaml ). There are also records in each epoch that have apps randomly selected from a list of apps that are from the EXCLUDED_CATEGORIES and EXCLUDED_APPS . This is to test that these apps are actually being excluded from the calculations of features. There are also records to test SINGLE_APPS calculations. Since application foreground is only available for Android there is only one file that contains data for Android. All other files (i.e. for iPhone) are empty data files.","title":"Application Foreground"},{"location":"developers/test-cases/#activity-recognition","text":"The raw Activity Recognition data file contains data for 1 day. The raw Activity Recognition data each epoch period contains rows that records 2 - 5 different activity_types . The is such that durations of activities can be tested. Additionally, there are records that mimic the duration of an activity over the time boundary of neighboring epochs. (For example, there a set of records that mimic the participant in_vehicle from afternoon into evening ) There is one file each with raw Activity Recognition data for testing both iOS and Android data formats. (plugin_google_activity_recognition_raw.csv for android and plugin_ios_activity_recognition_raw.csv for iOS) There is also an additional empty data file for both android and iOS for testing empty data files.","title":"Activity Recognition"},{"location":"developers/test-cases/#conversation","text":"The raw conversation data file contains data for 2 day. The raw conversation data contains records with a sample of both datatypes (i.e. voice/noise = 0 , and conversation = 2 ) as well as rows with for samples of each of the inference values (i.e. silence = 0 , noise = 1 , voice = 2 , and unknown = 3 ) for each epoch . The different datatype and inference records are randomly distributed throughout the epoch . Additionally there are 2 - 5 records for conversations ( datatype = 2, and inference = -1) in each epoch and for each epoch except night, there is a conversation record that has a double_convo_start timestamp that is from the previous epoch . This is to test the calculations of features across epochs . There is a raw conversation data file for both android and iOS platforms ( plugin_studentlife_audio_android_raw.csv and plugin_studentlife_audio_raw.csv respectively). Finally, there are also additional empty data files for both android and iOS for testing empty data files","title":"Conversation"},{"location":"developers/test-cases/#keyboard","text":"The raw keyboard data file contains data for 4 days. The raw keyboard data contains records with difference in timestamp ranging from milliseconds to seconds. With difference in timestamps between consecutive records more than 5 seconds helps us to create separate sessions within the usage of the same app. This helps to verify the case where sessions have to be different. The raw keyboard data contains records where the difference in text is less than 5 seconds which makes it into 1 session but because of difference of app new session starts. This edge case determines the behaviour within particular app and also within 5 seconds. The raw keyboard data also contains the records where length of current_text varies between consecutive rows. This helps us to tests on the cases where input text is entered by auto-suggested or auto-correct operations. One three-minute episode with a 1-minute row on Sun 08:59:54.65 and 09:00:00,another on Sun 12:01:02 that are considering a single episode in multi-timezone event segments to showcase how inferring time zone data for Keyboard from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes.","title":"Keyboard"},{"location":"developers/test-cases/#fitbit-calories-intraday","text":"Description A five-minute sedentary episode on Fri 11:00:00 A one-minute sedentary episode on Sun 02:00:00. It exists in November but not in February in STZ A five-minute sedentary episode on Fri 11:58:00. It is split within two 30-min segments and the morning A three-minute lightly active episode on Fri 11:10:00, a one-minute at 11:18:00 and a one-minute 11:24:00. These check for start and end times of first/last/longest episode A three-minute fairly active episode on Fri 11:40:00, a one-minute at 11:48:00 and a one-minute 11:54:00. These check for start and end times of first/last/longest episode A three-minute very active episode on Fri 12:10:00, a one-minute at 12:18:00 and a one-minute 12:24:00. These check for start and end times of first/last/longest episode A eight-minute MVPA episode with intertwined fairly and very active rows on Fri 12:30:00 The above episodes contain six higmet (>= 3 MET) episodes and nine lowmet episodes. One two-minute sedentary episode with a 1-minute row on Sun 09:00:00 and another on Sun 12:01:01 that are considering a single episode in multi-timezone event segments to showcase how inferring time zone data for Fitbit from phone data can produce inaccurate results around the tz change. This happens because the device was on LA time until 11:59 and switched to NY time at 12pm, in terms of actual time 09 am LA and 12 pm NY represent the same moment in time so 09:00 LA and 12:01 NY are consecutive minutes. A three-minute sedentary episode on Sat 08:59 that will be ignored for multi-timezone event segments. A three-minute sedentary episode on Sat 12:59 of which the first minute will be ignored for multi-timezone event segments since the test segment starts at 13:00 A three-minute sedentary episode on Sat 16:00 A four-minute sedentary episode on Sun 10:01 that will be ignored for Novembers\u2019s multi-timezone event segments since the test segment ends at 10am on that weekend. A three-minute very active episode on Sat 16:03. This episode and the one at 16:00 are counted as one for lowmet episodes Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit","title":"Fitbit Calories Intraday"},{"location":"developers/test-cases/#fitbit-sleep-summary","text":"Description A main sleep episode that starts on Fri 20:00:00 and ends on Sat 02:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Fri) data. A nap that starts on Sat 04:00:00 and ends on Sat 06:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday\u2019s (Fri) data. A nap that starts on Sat 13:00:00 and ends on Sat 15:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Sat) data. A main sleep that starts on Sun 01:00:00 and ends on Sun 12:00:00. This episode starts before 11am (Last Night End) which will be considered as yesterday\u2019s (Sat) data. A main sleep that starts on Sun 23:00:00 and ends on Mon 07:00:00. This episode starts after 11am (Last Night End) which will be considered as today\u2019s (Sun) data. Any segment shorter than one day will be ignored for sleep RAPIDS features. Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit","title":"Fitbit Sleep Summary"},{"location":"developers/test-cases/#fitbit-sleep-intraday","text":"Description A five-minute main sleep episode with asleep-classic level on Fri 11:00:00. An eight-hour main sleep episode on Fri 17:00:00. It is split into 2 parts for daily segment: a seven-hour sleep episode on Fri 17:00:00 and an one-hour sleep episode on Sat 00:00:00. A two-hour nap on Sat 01:00:00 that will be ignored for main sleep features. An one-hour nap on Sat 13:00:00 that will be ignored for main sleep features. An eight-hour main sleep episode on Sat 22:00:00. This episode ends on Sun 08:00:00 (NY) for March and Sun 06:00:00 (NY) for Novembers due to daylight savings. It will be considered for beforeMarchEvent segment and ignored for beforeNovemberEvent segment. A nine-hour main sleep episode on Sun 11:00:00. Start time will be assigned as NY time zone and converted to 14:00:00. A seven-hour main sleep episode on Mon 06:00:00. This episode will be split into two parts: a five-hour sleep episode on Mon 06:00:00 and a two-hour sleep episode on Mon 11:00:00. The first part will be discarded as it is before 11am (Last Night End) Any segment shorter than one day will be ignored for sleep PRICE features. Checklist time segment single tz multi tz platform 30min OK OK fitbit morning OK OK fitbit daily OK OK fitbit threeday OK OK fitbit weekend OK OK fitbit beforeMarchEvent OK OK fitbit beforeNovemberEvent OK OK fitbit","title":"Fitbit Sleep Intraday"},{"location":"developers/testing/","text":"Testing \u00b6 The following is a simple guide to run RAPIDS\u2019 tests. All files necessary for testing are stored in the ./tests/ directory Steps for Testing \u00b6 Testing Overview You have to create a single four day test dataset for the sensor you are working on. You will adjust your dataset with tests/script/assign_test_timestamps.py to fit Fri March 6th 2020 - Mon March 9th 2020 and Fri Oct 30th 2020 - Mon Nov 2nd 2020 . We test daylight saving times with these dates. We have one test participant per platform ( pids : android , ios , fitbit , empatica , empty ). The data device_id should be equal to the pid . We will run this test dataset against six test pipelines, three for frequency , periodic , and event time segments in a single time zone, and the same three in multiple time zones. You will have to create your test data to cover as many corner cases as possible. These cases depend on the sensor you are working on. The time segments and time zones to be tested are: Frequency 30 minutes ( 30min,30 ) Periodic morning ( morning,06:00:00,5H 59M 59S,every_day,0 ) daily ( daily,00:00:00,23H 59M 59S,every_day,0 ) three-day segments that repeat every day ( threeday,00:00:00,71H 59M 59S,every_day,0 ) three-day segments that repeat every Friday ( weekend,00:00:00,71H 59M 59S,wday,5 ) Event A segment that starts 3 hour before an event (Sat Mar 07 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves forward and the period 2am-3am does not exist. In this case, the segment would start on Sat Mar 07 2020 16:00:00 EST (timestamp: 1583614800000) and end on Sun Mar 08 2020 15:00:00 EST (timestamp: 1583694000000). ( beforeMarchEvent,1583625600000,22H,3H,-1,android ) A segment that starts 3 hour before an event (Sat Oct 31 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves back and the period 1am-2am exists twice. In this case, the segment would start on Sat Oct 31 2020 16:00:00 EST (timestamp: 1604174400000) and end on Sun Nov 01 2020 13:00:00 EST (timestamp: 1604253600000). ( beforeNovemberEvent,1604185200000,22H,3H,-1,android ) Single time zone to test America/New_York Multi time zones to test America/New_York starting at 0 America/Los_Angeles starting at 1583600400000 (Sat Mar 07 2020 12:00:00 EST) America/New_York starting at 1583683200000 (Sun Mar 08 2020 12:00:00 EST) America/Los_Angeles starting at 1604160000000 (Sat Oct 31 2020 12:00:00 EST) America/New_York starting at 1604250000000 (Sun Nov 01 2020 12:00:00 EST) Understanding event segments with multi timezones Document your tests Before you start implementing any test data you need to document your tests. The documentation of your tests should be added to docs/developers/test-cases.md under the corresponding sensor. You will need to add two subsections Description and the Checklist The amount of data you need depends on each sensor but you can be efficient by creating data that covers corner cases in more than one time segment. For example, a battery episode from 11am to 1pm, covers the case when an episode has to be split for 30min frequency segments and for morning segments. As a rule of thumb think about corner cases for 30min segments as they will give you the most flexibility. Only add tests for iOS if the raw data format is different than Android\u2019s (for example for screen) Create specific tests for Sunday before and after 02:00. These will test daylight saving switches, in March 02:00 to 02:59 do not exist, and in November 01:00 to 01:59 exist twice (read below how tests/script/assign_test_timestamps.py handles this) Example of Description Description is a list and every item describes the different scenarios your test data is covering. For example, if we are testing PHONE_BATTERY: - We test 24 discharge episodes, 24 charge episodes and 2 episodes with a 0 discharge rate - One episode is shorter than 30 minutes (`start timestamp` to `end timestamp`) - One episode is 120 minutes long from 11:00 to 13:00 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for morning segments - One episode is 60 minutes long from 23:30 to 00:30 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for daly segments (overnight) - One 0 discharge rate episode 10 minutes long that happens within a 30-minute segment (10:00 to 10:29) (`start timestamp` to `end timestamp`) - Three discharge episodes that happen between during beforeMarchEvent (start/end timestamps of those discharge episodes) - Three charge episodes that happen between during beforeMarchEvent (start/end timestamps of those charge episodes) - One discharge episode that happen between 00:30 and 04:00 to test for daylight saving times in March and Novemeber 2020. - ... any other test corner cases you can think of Describe your test cases in as much detail as possible so in the future if we find a bug in RAPIDS, we know what test case we did not include and should add. Example of Checklist Checklist is a table where you confirm you have verified the output of your dataset for the different time segments and time zones time segment single tz multi tz platform 30min OK OK android and iOS morning OK OK android and iOS daily OK OK android and iOS threeday OK OK android and iOS weekend OK OK android and iOS beforeMarchEvent OK OK android and iOS beforeNovemberEvent OK OK android and iOS Add raw input data. Add the raw test data to the corresponding sensor CSV file in tests/data/manual/aware_csv/SENSOR_raw.csv . Create the CSV if it does not exist. The test data you create will have the same columns as normal raw data except test_time replaces timestamp . To make your life easier, you can place a test data row in time using the test_time column with the following format: Day HH:MM:SS.XXX , for example Fri 22:54:30.597 . You can convert your manual test data to actual raw test data with the following commands: For the selected files: (It could be a single file name or multiple file names separated by whitespace(s)) python tests/scripts/assign_test_timestamps.py -f file_name_1 file_name_2 For all files under the tests/data/manual/aware_csv folder: python tests/scripts/assign_test_timestamps.py -a The script assign_test_timestamps.py converts you test_time column into a timestamp . For example, Fri 22:54:30.597 is converted to 1583553270597 ( Fri Mar 06 2020 22:54:30 GMT-0500 ) and to 1604112870597 ( Fri Oct 30 2020 22:54:30 GMT-0400 ). Note you can include milliseconds. The device_id should be the same as pid . Example of test data you need to create The test_time column will be automatically converted to a timestamp that fits our testing periods in March and November by tests/script/assign_test_timestamps.py test_time,device_id,battery_level,battery_scale,battery_status Fri 01:00:00.000,ios,90,100,4 Fri 01:00:30.500,ios,89,100,4 Fri 01:01:00.000,ios,80,100,4 Fri 01:01:45.500,ios,79,100,4 ... Sat 08:00:00.000,ios,78,100,4 Sat 08:01:00.000,ios,50,100,4 Sat 08:02:00.000,ios,49,100,4 Add expected output data. Add or update the expected output feature file of the participant and sensor you are testing: tests/data/processed/features/ { type_of_time_segment } / { pid } /device_sensor.csv # this example is expected output data for battery tests for periodic segments in a single timezone tests/data/processed/features/stz_periodic/android/phone_sensor.csv # this example is expected output data for battery tests for periodic segments in multi timezones tests/data/processed/features/mtz_periodic/android/phone_sensor.csv Edit the config file(s). Activate the sensor provider you are testing if it isn\u2019t already. Set [SENSOR][PROVIDER][COMPUTE] to TRUE in the config.yaml of the time segments and time zones you are testing: - tests/settings/stz_frequency_config.yaml # For single-timezone frequency time segments - tests/settings/stz_periodic_config.yaml # For single-timezone periodic time segments - tests/settings/stz_event_config.yaml # For single-timezone event time segments - tests/settings/mtz_frequency_config.yaml # For multi-timezone frequency time segments - tests/settings/mtz_periodic_config.yaml # For multi-timezone periodic time segments - tests/settings/mtz_event_config.yaml # For multi-timezone event time segments Run the pipeline and tests. You can run all six segment pipelines and their tests bash tests/scripts/run_tests.sh -t all You can run only the pipeline of a specific time segment and its tests bash tests/scripts/run_tests.sh -t stz_frequency -a both # swap stz_frequency for mtz_frequency, stz_event, mtz_event, etc Or, if you are working on your tests and you want to run a pipeline and its tests independently bash tests/scripts/run_tests.sh -t stz_frequency -a run bash tests/scripts/run_tests.sh -t stz_frequency -a test How does the test execution work? This bash script tests/scripts/run_tests.sh executes one or all test pipelines for different time segment types ( frequency , periodic , and events ) and single or multiple timezones. The python script tests/scripts/run_tests.py runs the tests. It parses the involved participants and active sensor providers in the config.yaml file of the time segment type and time zone being tested. We test that the output file we expect exists and that its content matches the expected values. Output Example The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... stz_periodic ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... stz_periodic ok test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... stz_frequency ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... stz_frequency FAIL The results above show that the for stz_periodic, both test_sensors_files_exist and test_sensors_features_calculations passed. While for stz_frequency, the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. Additionally, you should get the traceback of the failure (not shown here).","title":"Testing"},{"location":"developers/testing/#testing","text":"The following is a simple guide to run RAPIDS\u2019 tests. All files necessary for testing are stored in the ./tests/ directory","title":"Testing"},{"location":"developers/testing/#steps-for-testing","text":"Testing Overview You have to create a single four day test dataset for the sensor you are working on. You will adjust your dataset with tests/script/assign_test_timestamps.py to fit Fri March 6th 2020 - Mon March 9th 2020 and Fri Oct 30th 2020 - Mon Nov 2nd 2020 . We test daylight saving times with these dates. We have one test participant per platform ( pids : android , ios , fitbit , empatica , empty ). The data device_id should be equal to the pid . We will run this test dataset against six test pipelines, three for frequency , periodic , and event time segments in a single time zone, and the same three in multiple time zones. You will have to create your test data to cover as many corner cases as possible. These cases depend on the sensor you are working on. The time segments and time zones to be tested are: Frequency 30 minutes ( 30min,30 ) Periodic morning ( morning,06:00:00,5H 59M 59S,every_day,0 ) daily ( daily,00:00:00,23H 59M 59S,every_day,0 ) three-day segments that repeat every day ( threeday,00:00:00,71H 59M 59S,every_day,0 ) three-day segments that repeat every Friday ( weekend,00:00:00,71H 59M 59S,wday,5 ) Event A segment that starts 3 hour before an event (Sat Mar 07 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves forward and the period 2am-3am does not exist. In this case, the segment would start on Sat Mar 07 2020 16:00:00 EST (timestamp: 1583614800000) and end on Sun Mar 08 2020 15:00:00 EST (timestamp: 1583694000000). ( beforeMarchEvent,1583625600000,22H,3H,-1,android ) A segment that starts 3 hour before an event (Sat Oct 31 2020 19:00:00 EST) and lasts for 22 hours. Note that the last part of this segment will happen during a daylight saving change on Sunday at 2am when the clock moves back and the period 1am-2am exists twice. In this case, the segment would start on Sat Oct 31 2020 16:00:00 EST (timestamp: 1604174400000) and end on Sun Nov 01 2020 13:00:00 EST (timestamp: 1604253600000). ( beforeNovemberEvent,1604185200000,22H,3H,-1,android ) Single time zone to test America/New_York Multi time zones to test America/New_York starting at 0 America/Los_Angeles starting at 1583600400000 (Sat Mar 07 2020 12:00:00 EST) America/New_York starting at 1583683200000 (Sun Mar 08 2020 12:00:00 EST) America/Los_Angeles starting at 1604160000000 (Sat Oct 31 2020 12:00:00 EST) America/New_York starting at 1604250000000 (Sun Nov 01 2020 12:00:00 EST) Understanding event segments with multi timezones Document your tests Before you start implementing any test data you need to document your tests. The documentation of your tests should be added to docs/developers/test-cases.md under the corresponding sensor. You will need to add two subsections Description and the Checklist The amount of data you need depends on each sensor but you can be efficient by creating data that covers corner cases in more than one time segment. For example, a battery episode from 11am to 1pm, covers the case when an episode has to be split for 30min frequency segments and for morning segments. As a rule of thumb think about corner cases for 30min segments as they will give you the most flexibility. Only add tests for iOS if the raw data format is different than Android\u2019s (for example for screen) Create specific tests for Sunday before and after 02:00. These will test daylight saving switches, in March 02:00 to 02:59 do not exist, and in November 01:00 to 01:59 exist twice (read below how tests/script/assign_test_timestamps.py handles this) Example of Description Description is a list and every item describes the different scenarios your test data is covering. For example, if we are testing PHONE_BATTERY: - We test 24 discharge episodes, 24 charge episodes and 2 episodes with a 0 discharge rate - One episode is shorter than 30 minutes (`start timestamp` to `end timestamp`) - One episode is 120 minutes long from 11:00 to 13:00 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for morning segments - One episode is 60 minutes long from 23:30 to 00:30 (`start timestamp` to `end timestamp`). This one covers the case when an episode has to be chunked for 30min frequency segments and for daly segments (overnight) - One 0 discharge rate episode 10 minutes long that happens within a 30-minute segment (10:00 to 10:29) (`start timestamp` to `end timestamp`) - Three discharge episodes that happen between during beforeMarchEvent (start/end timestamps of those discharge episodes) - Three charge episodes that happen between during beforeMarchEvent (start/end timestamps of those charge episodes) - One discharge episode that happen between 00:30 and 04:00 to test for daylight saving times in March and Novemeber 2020. - ... any other test corner cases you can think of Describe your test cases in as much detail as possible so in the future if we find a bug in RAPIDS, we know what test case we did not include and should add. Example of Checklist Checklist is a table where you confirm you have verified the output of your dataset for the different time segments and time zones time segment single tz multi tz platform 30min OK OK android and iOS morning OK OK android and iOS daily OK OK android and iOS threeday OK OK android and iOS weekend OK OK android and iOS beforeMarchEvent OK OK android and iOS beforeNovemberEvent OK OK android and iOS Add raw input data. Add the raw test data to the corresponding sensor CSV file in tests/data/manual/aware_csv/SENSOR_raw.csv . Create the CSV if it does not exist. The test data you create will have the same columns as normal raw data except test_time replaces timestamp . To make your life easier, you can place a test data row in time using the test_time column with the following format: Day HH:MM:SS.XXX , for example Fri 22:54:30.597 . You can convert your manual test data to actual raw test data with the following commands: For the selected files: (It could be a single file name or multiple file names separated by whitespace(s)) python tests/scripts/assign_test_timestamps.py -f file_name_1 file_name_2 For all files under the tests/data/manual/aware_csv folder: python tests/scripts/assign_test_timestamps.py -a The script assign_test_timestamps.py converts you test_time column into a timestamp . For example, Fri 22:54:30.597 is converted to 1583553270597 ( Fri Mar 06 2020 22:54:30 GMT-0500 ) and to 1604112870597 ( Fri Oct 30 2020 22:54:30 GMT-0400 ). Note you can include milliseconds. The device_id should be the same as pid . Example of test data you need to create The test_time column will be automatically converted to a timestamp that fits our testing periods in March and November by tests/script/assign_test_timestamps.py test_time,device_id,battery_level,battery_scale,battery_status Fri 01:00:00.000,ios,90,100,4 Fri 01:00:30.500,ios,89,100,4 Fri 01:01:00.000,ios,80,100,4 Fri 01:01:45.500,ios,79,100,4 ... Sat 08:00:00.000,ios,78,100,4 Sat 08:01:00.000,ios,50,100,4 Sat 08:02:00.000,ios,49,100,4 Add expected output data. Add or update the expected output feature file of the participant and sensor you are testing: tests/data/processed/features/ { type_of_time_segment } / { pid } /device_sensor.csv # this example is expected output data for battery tests for periodic segments in a single timezone tests/data/processed/features/stz_periodic/android/phone_sensor.csv # this example is expected output data for battery tests for periodic segments in multi timezones tests/data/processed/features/mtz_periodic/android/phone_sensor.csv Edit the config file(s). Activate the sensor provider you are testing if it isn\u2019t already. Set [SENSOR][PROVIDER][COMPUTE] to TRUE in the config.yaml of the time segments and time zones you are testing: - tests/settings/stz_frequency_config.yaml # For single-timezone frequency time segments - tests/settings/stz_periodic_config.yaml # For single-timezone periodic time segments - tests/settings/stz_event_config.yaml # For single-timezone event time segments - tests/settings/mtz_frequency_config.yaml # For multi-timezone frequency time segments - tests/settings/mtz_periodic_config.yaml # For multi-timezone periodic time segments - tests/settings/mtz_event_config.yaml # For multi-timezone event time segments Run the pipeline and tests. You can run all six segment pipelines and their tests bash tests/scripts/run_tests.sh -t all You can run only the pipeline of a specific time segment and its tests bash tests/scripts/run_tests.sh -t stz_frequency -a both # swap stz_frequency for mtz_frequency, stz_event, mtz_event, etc Or, if you are working on your tests and you want to run a pipeline and its tests independently bash tests/scripts/run_tests.sh -t stz_frequency -a run bash tests/scripts/run_tests.sh -t stz_frequency -a test How does the test execution work? This bash script tests/scripts/run_tests.sh executes one or all test pipelines for different time segment types ( frequency , periodic , and events ) and single or multiple timezones. The python script tests/scripts/run_tests.py runs the tests. It parses the involved participants and active sensor providers in the config.yaml file of the time segment type and time zone being tested. We test that the output file we expect exists and that its content matches the expected values. Output Example The following is a snippet of the output you should see after running your test. test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... stz_periodic ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... stz_periodic ok test_sensors_files_exist ( test_sensor_features.TestSensorFeatures ) ... stz_frequency ok test_sensors_features_calculations ( test_sensor_features.TestSensorFeatures ) ... stz_frequency FAIL The results above show that the for stz_periodic, both test_sensors_files_exist and test_sensors_features_calculations passed. While for stz_frequency, the first test test_sensors_files_exist passed while test_sensors_features_calculations failed. Additionally, you should get the traceback of the failure (not shown here).","title":"Steps for Testing"},{"location":"developers/validation-schema-config/","text":"Validation schema of config.yaml \u00b6 Why do we need to validate the config.yaml ? Most of the key/values in the config.yaml are constrained to a set of possible values or types. For example [TIME_SEGMENTS][TYPE] can only be one of [\"FREQUENCY\", \"PERIODIC\", \"EVENT\"] , and [TIMEZONE] has to be a string. We should show the user an error if that\u2019s not the case. We could validate this in Python or R but since we reuse scripts and keys in multiple places, tracking these validations can be time consuming and get out of control. Thus, we do these validations through a schema and check that schema before RAPIDS starts processing any data so the user can see the error right away. Keep in mind these validations can only cover certain base cases. Some validations that require more complex logic should still be done in the respective script. For example, we can check that a CSV file path actually ends in .csv but we can only check that the file actually exists in a Python script. The structure and values of the config.yaml file are validated using a YAML schema stored in tools/config.schema.yaml . Each key in config.yaml , for example PIDS , has a corresponding entry in the schema where we can validate its type, possible values, required properties, min and max values, among other things. The config.yaml is validated against the schema every time RAPIDS runs (see the top of the Snakefile ): validate ( config , \"tools/config.schema.yaml\" ) Structure of the schema \u00b6 The schema has three main sections required , definitions , and properties . All of them are just nested key/value YAML pairs, where the value can be a primitive type ( integer , string , boolean , number ) or can be another key/value pair ( object ). required \u00b6 required lists properties that should be present in the config.yaml . We will almost always add every config.yaml key to this list (meaning that the user cannot delete any of those keys like TIMEZONE or PIDS ). definitions \u00b6 definitions lists key/values that are common to different properties so we can reuse them. You can define a key/value under definitions and use $ref to refer to it in any property . For example, every sensor like [PHONE_ACCELEROMETER] has one or more providers like RAPIDS and PANDA , these providers have some common properties like the COMPUTE flag or the SRC_SCRIPT string. Therefore we define a shared provider \u201ctemplate\u201d that is used by every provider and extended with properties exclusive to each one of them. For example: provider definition (template) The PROVIDER definition will be used later on different properties . PROVIDER : type : object required : [ COMPUTE , SRC_SCRIPT , FEATURES ] properties : COMPUTE : type : boolean FEATURES : type : [ array , object ] SRC_SCRIPT : type : string pattern : \"^.*\\\\.(py|R)$\" provider reusing and extending the template Notice that RAPIDS (a provider) uses and extends the PROVIDER template in this example. The FEATURES key is overriding the FEATURES key from the #/definitions/PROVIDER template but is keeping the validation for COMPUTE , and SRC_SCRIPT . For more details about reusing properties, go to this link PHONE_ACCELEROMETER : type : object # .. other properties PROVIDERS : type : [ \"null\" , object ] properties : RAPIDS : allOf : - $ref : \"#/definitions/PROVIDER\" - properties : FEATURES : type : array uniqueItems : True items : type : string enum : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] properties \u00b6 properties are nested key/values that describe the different components of our config.yaml file. Values can be of one or more primitive types like string , number , array , boolean and null . Values can also be another key/value pair (of type object ) that are similar to a dictionary in Python. For example, the following property validates the PIDS of our config.yaml . It checks that PIDS is an array with unique items of type string . PIDS : type : array uniqueItems : True items : type : string Modifying the schema \u00b6 Validating the config.yaml during development If you updated the schema and want to check the config.yaml is compliant, you can run the command snakemake --list-params-changes . You will see Building DAG of jobs... if there are no problems or an error message otherwise (try setting any COMPUTE flag to a string like test instead of False/True ). You can use this command without having to configure RAPIDS to process any participants or sensors. You can validate different aspects of each key/value in our config.yaml file: number/integer Including min and max values MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS : type : number minimum : 0 maximum : 1 FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD : type : integer exclusiveMinimum : 0 string Including valid values ( enum ) items : type : string enum : [ \"count\" , \"maxlux\" , \"minlux\" , \"avglux\" , \"medianlux\" , \"stdlux\" ] boolean MINUTES_DATA_USED : type : boolean array Including whether or not it should have unique values, the type of the array\u2019s elements ( strings , numbers ) and valid values ( enum ). MESSAGES_TYPES : type : array uniqueItems : True items : type : string enum : [ \"received\" , \"sent\" ] object PARENT is an object that has two properties. KID1 is one of those properties that are, in turn, another object that will reuse the \"#/definitions/PROVIDER\" definition AND also include (extend) two extra properties GRAND_KID1 of type array and GRAND_KID2 of type number . KID2 is another property of PARENT of type boolean . The schema validation looks like this PARENT : type : object properties : KID1 : allOf : - $ref : \"#/definitions/PROVIDER\" - properties : GRAND_KID1 : type : array uniqueItems : True GRAND_KID2 : type : number KID2 : type : boolean The config.yaml key that the previous schema validates looks like this: PARENT : KID1 : # These four come from the `PROVIDER` definition (template) COMPUTE : False FEATURES : [ x , y ] # an array SRC_SCRIPT : \"a path to a py or R script\" # This two come from the extension GRAND_KID1 : [ a , b ] # an array GRAND_KID2 : 5.1 # an number KID2 : True # a boolean Verifying the schema is correct \u00b6 We recommend that before you start modifying the schema you modify the config.yaml key that you want to validate with an invalid value. For example, if you want to validate that COMPUTE is boolean, you set COMPUTE: 123 . Then create your validation, run snakemake --list-params-changes and make sure your validation fails (123 is not boolean ), and then set the key to the correct value. In other words, make sure it\u2019s broken first so that you know that your validation works. Warning Be careful . You can check that the schema config.schema.yaml has a valid format by running python tools/check_schema.py . You will see this message if its structure is correct: Schema is OK . However, we don\u2019t have a way to detect typos, for example allOf will work but allOF won\u2019t (capital F ) and it won\u2019t show any error. That\u2019s why we recommend to start with an invalid key/value in your config.yaml so that you can be sure the schema validation finds the problem. Useful resources \u00b6 Read the following links to learn more about what we can validate with schemas. They are based on JSON instead of YAML schemas but the same concepts apply. Understanding JSON Schemas Specification of the JSON schema we use","title":"Validation schema of config.yaml"},{"location":"developers/validation-schema-config/#validation-schema-of-configyaml","text":"Why do we need to validate the config.yaml ? Most of the key/values in the config.yaml are constrained to a set of possible values or types. For example [TIME_SEGMENTS][TYPE] can only be one of [\"FREQUENCY\", \"PERIODIC\", \"EVENT\"] , and [TIMEZONE] has to be a string. We should show the user an error if that\u2019s not the case. We could validate this in Python or R but since we reuse scripts and keys in multiple places, tracking these validations can be time consuming and get out of control. Thus, we do these validations through a schema and check that schema before RAPIDS starts processing any data so the user can see the error right away. Keep in mind these validations can only cover certain base cases. Some validations that require more complex logic should still be done in the respective script. For example, we can check that a CSV file path actually ends in .csv but we can only check that the file actually exists in a Python script. The structure and values of the config.yaml file are validated using a YAML schema stored in tools/config.schema.yaml . Each key in config.yaml , for example PIDS , has a corresponding entry in the schema where we can validate its type, possible values, required properties, min and max values, among other things. The config.yaml is validated against the schema every time RAPIDS runs (see the top of the Snakefile ): validate ( config , \"tools/config.schema.yaml\" )","title":"Validation schema of config.yaml"},{"location":"developers/validation-schema-config/#structure-of-the-schema","text":"The schema has three main sections required , definitions , and properties . All of them are just nested key/value YAML pairs, where the value can be a primitive type ( integer , string , boolean , number ) or can be another key/value pair ( object ).","title":"Structure of the schema"},{"location":"developers/validation-schema-config/#required","text":"required lists properties that should be present in the config.yaml . We will almost always add every config.yaml key to this list (meaning that the user cannot delete any of those keys like TIMEZONE or PIDS ).","title":"required"},{"location":"developers/validation-schema-config/#definitions","text":"definitions lists key/values that are common to different properties so we can reuse them. You can define a key/value under definitions and use $ref to refer to it in any property . For example, every sensor like [PHONE_ACCELEROMETER] has one or more providers like RAPIDS and PANDA , these providers have some common properties like the COMPUTE flag or the SRC_SCRIPT string. Therefore we define a shared provider \u201ctemplate\u201d that is used by every provider and extended with properties exclusive to each one of them. For example: provider definition (template) The PROVIDER definition will be used later on different properties . PROVIDER : type : object required : [ COMPUTE , SRC_SCRIPT , FEATURES ] properties : COMPUTE : type : boolean FEATURES : type : [ array , object ] SRC_SCRIPT : type : string pattern : \"^.*\\\\.(py|R)$\" provider reusing and extending the template Notice that RAPIDS (a provider) uses and extends the PROVIDER template in this example. The FEATURES key is overriding the FEATURES key from the #/definitions/PROVIDER template but is keeping the validation for COMPUTE , and SRC_SCRIPT . For more details about reusing properties, go to this link PHONE_ACCELEROMETER : type : object # .. other properties PROVIDERS : type : [ \"null\" , object ] properties : RAPIDS : allOf : - $ref : \"#/definitions/PROVIDER\" - properties : FEATURES : type : array uniqueItems : True items : type : string enum : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ]","title":"definitions"},{"location":"developers/validation-schema-config/#properties","text":"properties are nested key/values that describe the different components of our config.yaml file. Values can be of one or more primitive types like string , number , array , boolean and null . Values can also be another key/value pair (of type object ) that are similar to a dictionary in Python. For example, the following property validates the PIDS of our config.yaml . It checks that PIDS is an array with unique items of type string . PIDS : type : array uniqueItems : True items : type : string","title":"properties"},{"location":"developers/validation-schema-config/#modifying-the-schema","text":"Validating the config.yaml during development If you updated the schema and want to check the config.yaml is compliant, you can run the command snakemake --list-params-changes . You will see Building DAG of jobs... if there are no problems or an error message otherwise (try setting any COMPUTE flag to a string like test instead of False/True ). You can use this command without having to configure RAPIDS to process any participants or sensors. You can validate different aspects of each key/value in our config.yaml file: number/integer Including min and max values MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS : type : number minimum : 0 maximum : 1 FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD : type : integer exclusiveMinimum : 0 string Including valid values ( enum ) items : type : string enum : [ \"count\" , \"maxlux\" , \"minlux\" , \"avglux\" , \"medianlux\" , \"stdlux\" ] boolean MINUTES_DATA_USED : type : boolean array Including whether or not it should have unique values, the type of the array\u2019s elements ( strings , numbers ) and valid values ( enum ). MESSAGES_TYPES : type : array uniqueItems : True items : type : string enum : [ \"received\" , \"sent\" ] object PARENT is an object that has two properties. KID1 is one of those properties that are, in turn, another object that will reuse the \"#/definitions/PROVIDER\" definition AND also include (extend) two extra properties GRAND_KID1 of type array and GRAND_KID2 of type number . KID2 is another property of PARENT of type boolean . The schema validation looks like this PARENT : type : object properties : KID1 : allOf : - $ref : \"#/definitions/PROVIDER\" - properties : GRAND_KID1 : type : array uniqueItems : True GRAND_KID2 : type : number KID2 : type : boolean The config.yaml key that the previous schema validates looks like this: PARENT : KID1 : # These four come from the `PROVIDER` definition (template) COMPUTE : False FEATURES : [ x , y ] # an array SRC_SCRIPT : \"a path to a py or R script\" # This two come from the extension GRAND_KID1 : [ a , b ] # an array GRAND_KID2 : 5.1 # an number KID2 : True # a boolean","title":"Modifying the schema"},{"location":"developers/validation-schema-config/#verifying-the-schema-is-correct","text":"We recommend that before you start modifying the schema you modify the config.yaml key that you want to validate with an invalid value. For example, if you want to validate that COMPUTE is boolean, you set COMPUTE: 123 . Then create your validation, run snakemake --list-params-changes and make sure your validation fails (123 is not boolean ), and then set the key to the correct value. In other words, make sure it\u2019s broken first so that you know that your validation works. Warning Be careful . You can check that the schema config.schema.yaml has a valid format by running python tools/check_schema.py . You will see this message if its structure is correct: Schema is OK . However, we don\u2019t have a way to detect typos, for example allOf will work but allOF won\u2019t (capital F ) and it won\u2019t show any error. That\u2019s why we recommend to start with an invalid key/value in your config.yaml so that you can be sure the schema validation finds the problem.","title":"Verifying the schema is correct"},{"location":"developers/validation-schema-config/#useful-resources","text":"Read the following links to learn more about what we can validate with schemas. They are based on JSON instead of YAML schemas but the same concepts apply. Understanding JSON Schemas Specification of the JSON schema we use","title":"Useful resources"},{"location":"developers/virtual-environments/","text":"Python Virtual Environment \u00b6 Add new packages \u00b6 Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ). Remove packages \u00b6 Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME Updating all packages \u00b6 Make sure your Python virtual environment is active ( conda activate YOUR_ENV ), then run conda update --all Update your conda environment.yaml \u00b6 After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran and mkl to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' | sed 's/^.*mkl=.*$/ - mkl/' > environment.yml R Virtual Environment \u00b6 Add new packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\") Remove packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\") Updating all packages \u00b6 Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::update() Update your R renv.lock \u00b6 After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Virtual Environments"},{"location":"developers/virtual-environments/#python-virtual-environment","text":"","title":"Python Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages","text":"Try to install any new package using conda install -c CHANNEL PACKAGE_NAME (you can use pip if the package is only available there). Make sure your Python virtual environment is active ( conda activate YOUR_ENV ).","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages","text":"Uninstall packages using the same manager you used to install them conda remove PACKAGE_NAME or pip uninstall PACKAGE_NAME","title":"Remove packages"},{"location":"developers/virtual-environments/#updating-all-packages","text":"Make sure your Python virtual environment is active ( conda activate YOUR_ENV ), then run conda update --all","title":"Updating all packages"},{"location":"developers/virtual-environments/#update-your-conda-environmentyaml","text":"After installing or removing a package you can use the following command in your terminal to update your environment.yaml before publishing your pipeline. Note that we ignore the package version for libfortran and mkl to keep compatibility with Linux: conda env export --no-builds | sed 's/^.*libgfortran.*$/ - libgfortran/' | sed 's/^.*mkl=.*$/ - mkl/' > environment.yml","title":"Update your conda environment.yaml"},{"location":"developers/virtual-environments/#r-virtual-environment","text":"","title":"R Virtual Environment"},{"location":"developers/virtual-environments/#add-new-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::install(\"PACKAGE_NAME\")","title":"Add new packages"},{"location":"developers/virtual-environments/#remove-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::remove(\"PACKAGE_NAME\")","title":"Remove packages"},{"location":"developers/virtual-environments/#updating-all-packages_1","text":"Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::update()","title":"Updating all packages"},{"location":"developers/virtual-environments/#update-your-r-renvlock","text":"After installing or removing a package you can use the following command in your terminal to update your renv.lock before publishing your pipeline. Open your terminal and navigate to RAPIDS\u2019 root folder Run R to open an R interactive session Run renv::snapshot() (renv will ask you to confirm any updates to this file)","title":"Update your R renv.lock"},{"location":"features/add-new-features/","text":"Add New Features \u00b6 Hint We recommend reading the Behavioral Features Introduction before reading this page. You can implement new features in Python or R scripts. You won\u2019t have to deal with time zones, dates, times, data cleaning, or preprocessing. The data that RAPIDS pipes to your feature extraction code are ready to process. New Features for Existing Sensors \u00b6 You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create your feature provider script Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 with a Python script that requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor of any device with a configuration entry in config.yaml : Smartphone (AWARE) Phone Accelerometer Phone Activity Recognition Phone Applications Crashes Phone Applications Foreground Phone Applications Notifications Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Keyboard Phone Light Phone Locations Phone Log Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Fitbit Data Yield Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Sleep Intraday Fitbit Steps Summary Fitbit Steps Intraday Empatica Empatica Accelerometer Empatica Heart Rate Empatica Temperature Empatica Electrodermal Activity Empatica Blood Volume Pulse Empatica Inter Beat Interval Empatica Tags Modify the config.yaml file \u00b6 In this step, you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : CONTAINER : accelerometer PROVIDERS : RAPIDS : # this is a feature provider COMPUTE : False ... PANDA : # this is another feature provider COMPUTE : False ... VEGA : # this is our new feature provider COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_SCRIPT : src/features/phone_accelerometer/vega/main.py Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string, or an array of any of such types. [SRC_SCRIPT] The relative path from RAPIDS\u2019 root folder to a script that computes the features for this provider. It can be implemented in R or Python. Create a feature provider script \u00b6 Create your feature Python or R script called main.py or main.R in the correct folder, src/feature/[sensorname]/[providername]/ . RAPIDS automatically loads and executes it based on the config key [SRC_SCRIPT] you added in the last step. For our example, this script is: src/feature/phone_accelerometer/vega/main.py Implement your feature extraction code \u00b6 Every feature script ( main.[py|R] ) needs a [providername]_features function with specific parameters. RAPIDS calls this function with the sensor data ready to process and with other functions and arguments you will need. Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): # empty for now return ( your_features_df ) R function [ providername ] _ features <- function ( sensor_data , time_segment , provider ){ # empty for now return ( your_features_df ) } Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example, this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R, this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The next step is to implement the code that computes your behavioral features in your provider script\u2019s function. As with any other script, this function can call other auxiliary methods, but in general terms, it should have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that the phone\u2019s battery, screen, and activity recognition data are given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but keep reading to understand why we need it. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and weekend basis for p01 . The labels are arbitrary, and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks , and once where time_segment is my_weekends . In this example, not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from that number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g., 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. The name of your features should only contain letters or numbers ( feature1 ) by convention. RAPIDS automatically adds the correct sensor and provider prefix; in our example, this prefix is phone_accelerometr_vega_ . PHONE_ACCELEROMETER Provider Example For your reference, this our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features import pandas as pd import numpy as np def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features New Features for Non-Existing Sensors \u00b6 If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), open a new discussion in Github and we can add the necessary code so you can follow the instructions above.","title":"Add New Features"},{"location":"features/add-new-features/#add-new-features","text":"Hint We recommend reading the Behavioral Features Introduction before reading this page. You can implement new features in Python or R scripts. You won\u2019t have to deal with time zones, dates, times, data cleaning, or preprocessing. The data that RAPIDS pipes to your feature extraction code are ready to process.","title":"Add New Features"},{"location":"features/add-new-features/#new-features-for-existing-sensors","text":"You can add new features to any existing sensors (see list below) by adding a new provider in three steps: Modify the config.yaml file Create your feature provider script Implement your features extraction code As a tutorial, we will add a new provider for PHONE_ACCELEROMETER called VEGA that extracts feature1 , feature2 , feature3 with a Python script that requires a parameter from the user called MY_PARAMETER . Existing Sensors An existing sensor of any device with a configuration entry in config.yaml : Smartphone (AWARE) Phone Accelerometer Phone Activity Recognition Phone Applications Crashes Phone Applications Foreground Phone Applications Notifications Phone Battery Phone Bluetooth Phone Calls Phone Conversation Phone Data Yield Phone Keyboard Phone Light Phone Locations Phone Log Phone Messages Phone Screen Phone WiFI Connected Phone WiFI Visible Fitbit Fitbit Data Yield Fitbit Heart Rate Summary Fitbit Heart Rate Intraday Fitbit Sleep Summary Fitbit Sleep Intraday Fitbit Steps Summary Fitbit Steps Intraday Empatica Empatica Accelerometer Empatica Heart Rate Empatica Temperature Empatica Electrodermal Activity Empatica Blood Volume Pulse Empatica Inter Beat Interval Empatica Tags","title":"New Features for Existing Sensors"},{"location":"features/add-new-features/#modify-the-configyaml-file","text":"In this step, you need to add your provider configuration section under the relevant sensor in config.yaml . See our example for our tutorial\u2019s VEGA provider for PHONE_ACCELEROMETER : Example configuration for a new accelerometer provider VEGA PHONE_ACCELEROMETER : CONTAINER : accelerometer PROVIDERS : RAPIDS : # this is a feature provider COMPUTE : False ... PANDA : # this is another feature provider COMPUTE : False ... VEGA : # this is our new feature provider COMPUTE : False FEATURES : [ \"feature1\" , \"feature2\" , \"feature3\" ] MY_PARAMTER : a_string SRC_SCRIPT : src/features/phone_accelerometer/vega/main.py Key Description [COMPUTE] Flag to activate/deactivate your provider [FEATURES] List of features your provider supports. Your provider code should only return the features on this list [MY_PARAMTER] An arbitrary parameter that our example provider VEGA needs. This can be a boolean, integer, float, string, or an array of any of such types. [SRC_SCRIPT] The relative path from RAPIDS\u2019 root folder to a script that computes the features for this provider. It can be implemented in R or Python.","title":"Modify the config.yaml file"},{"location":"features/add-new-features/#create-a-feature-provider-script","text":"Create your feature Python or R script called main.py or main.R in the correct folder, src/feature/[sensorname]/[providername]/ . RAPIDS automatically loads and executes it based on the config key [SRC_SCRIPT] you added in the last step. For our example, this script is: src/feature/phone_accelerometer/vega/main.py","title":"Create a feature provider script"},{"location":"features/add-new-features/#implement-your-feature-extraction-code","text":"Every feature script ( main.[py|R] ) needs a [providername]_features function with specific parameters. RAPIDS calls this function with the sensor data ready to process and with other functions and arguments you will need. Python function def [ providername ] _features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): # empty for now return ( your_features_df ) R function [ providername ] _ features <- function ( sensor_data , time_segment , provider ){ # empty for now return ( your_features_df ) } Parameter Description sensor_data_files Path to the CSV file containing the data of a single participant. This data has been cleaned and preprocessed. Your function will be automatically called for each participant in your study (in the [PIDS] array in config.yaml ) time_segment The label of the time segment that should be processed. provider The parameters you configured for your provider in config.yaml will be available in this variable as a dictionary in Python or a list in R. In our example, this dictionary contains {MY_PARAMETER:\"a_string\"} filter_data_by_segment Python only. A function that you will use to filter your data. In R, this function is already available in the environment. *args Python only. Not used for now **kwargs Python only. Not used for now The next step is to implement the code that computes your behavioral features in your provider script\u2019s function. As with any other script, this function can call other auxiliary methods, but in general terms, it should have three stages: 1. Read a participant\u2019s data by loading the CSV data stored in the file pointed by sensor_data_files acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) Note that the phone\u2019s battery, screen, and activity recognition data are given as episodes instead of event rows (for example, start and end timestamps of the periods the phone screen was on) 2. Filter your data to process only those rows that belong to time_segment This step is only one line of code, but keep reading to understand why we need it. acc_data = filter_data_by_segment ( acc_data , time_segment ) You should use the filter_data_by_segment() function to process and group those rows that belong to each of the time segments RAPIDS could be configured with . Let\u2019s understand the filter_data_by_segment() function with an example. A RAPIDS user can extract features on any arbitrary time segment . A time segment is a period that has a label and one or more instances. For example, the user (or you) could have requested features on a daily, weekly, and weekend basis for p01 . The labels are arbitrary, and the instances depend on the days a participant was monitored for: the daily segment could be named my_days and if p01 was monitored for 14 days, it would have 14 instances the weekly segment could be named my_weeks and if p01 was monitored for 14 days, it would have 2 instances. the weekend segment could be named my_weekends and if p01 was monitored for 14 days, it would have 2 instances. For this example, RAPIDS will call your provider function three times for p01 , once where time_segment is my_days , once where time_segment is my_weeks , and once where time_segment is my_weekends . In this example, not every row in p01 \u2018s data needs to take part in the feature computation for either segment and the rows need to be grouped differently. Thus filter_data_by_segment() comes in handy, it will return a data frame that contains the rows that were logged during a time segment plus an extra column called local_segment . This new column will have as many unique values as time segment instances exist (14, 2, and 2 for our p01 \u2018s my_days , my_weeks , and my_weekends examples). After filtering, you should group the data frame by this column and compute any desired features , for example: acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () The reason RAPIDS does not filter the participant\u2019s data set for you is because your code might need to compute something based on a participant\u2019s complete dataset before computing their features. For example, you might want to identify the number that called a participant the most throughout the study before computing a feature with the number of calls the participant received from that number. 3. Return a data frame with your features After filtering, grouping your data, and computing your features, your provider function should return a data frame that has: One row per time segment instance (e.g., 14 our p01 \u2018s my_days example) The local_segment column added by filter_data_by_segment() One column per feature. The name of your features should only contain letters or numbers ( feature1 ) by convention. RAPIDS automatically adds the correct sensor and provider prefix; in our example, this prefix is phone_accelerometr_vega_ . PHONE_ACCELEROMETER Provider Example For your reference, this our own provider ( RAPIDS ) for PHONE_ACCELEROMETER that computes five acceleration features import pandas as pd import numpy as np def rapids_features ( sensor_data_files , time_segment , provider , filter_data_by_segment , * args , ** kwargs ): acc_data = pd . read_csv ( sensor_data_files [ \"sensor_data\" ]) requested_features = provider [ \"FEATURES\" ] # name of the features this function can compute base_features_names = [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] # the subset of requested features this function can compute features_to_compute = list ( set ( requested_features ) & set ( base_features_names )) acc_features = pd . DataFrame ( columns = [ \"local_segment\" ] + features_to_compute ) if not acc_data . empty : acc_data = filter_data_by_segment ( acc_data , time_segment ) if not acc_data . empty : acc_features = pd . DataFrame () # get magnitude related features: magnitude = sqrt(x^2+y^2+z^2) magnitude = acc_data . apply ( lambda row : np . sqrt ( row [ \"double_values_0\" ] ** 2 + row [ \"double_values_1\" ] ** 2 + row [ \"double_values_2\" ] ** 2 ), axis = 1 ) acc_data = acc_data . assign ( magnitude = magnitude . values ) if \"maxmagnitude\" in features_to_compute : acc_features [ \"maxmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . max () if \"minmagnitude\" in features_to_compute : acc_features [ \"minmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . min () if \"avgmagnitude\" in features_to_compute : acc_features [ \"avgmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . mean () if \"medianmagnitude\" in features_to_compute : acc_features [ \"medianmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . median () if \"stdmagnitude\" in features_to_compute : acc_features [ \"stdmagnitude\" ] = acc_data . groupby ([ \"local_segment\" ])[ \"magnitude\" ] . std () acc_features = acc_features . reset_index () return acc_features","title":"Implement your feature extraction code"},{"location":"features/add-new-features/#new-features-for-non-existing-sensors","text":"If you want to add features for a device or a sensor that we do not support at the moment (those that do not appear in the \"Existing Sensors\" list above), open a new discussion in Github and we can add the necessary code so you can follow the instructions above.","title":"New Features for Non-Existing Sensors"},{"location":"features/empatica-accelerometer/","text":"Empatica Accelerometer \u00b6 Sensor parameters description for [EMPATICA_ACCELEROMETER] : Key Description [CONTAINER] Name of the CSV file containing accelerometer data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_accelerometer_raw.csv - data/raw/ { pid } /empatica_accelerometer_with_datetime.csv - data/interim/ { pid } /empatica_accelerometer_features/empatica_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_accelerometer.csv Parameters description for [EMPATICA_ACCELEROMETER][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_ACCELEROMETER features from the DBDP provider [FEATURES] Features to be computed, see table below Features description for [EMPATICA_ACCELEROMETER][PROVIDERS][RAPDBDPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is too big to fit in memory. We are considering different alternatives to overcome this problem, if this is something you need, get in touch and we can discuss how to implement it.","title":"Empatica Accelerometer"},{"location":"features/empatica-accelerometer/#empatica-accelerometer","text":"Sensor parameters description for [EMPATICA_ACCELEROMETER] : Key Description [CONTAINER] Name of the CSV file containing accelerometer data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Accelerometer"},{"location":"features/empatica-accelerometer/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_accelerometer_raw.csv - data/raw/ { pid } /empatica_accelerometer_with_datetime.csv - data/interim/ { pid } /empatica_accelerometer_features/empatica_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_accelerometer.csv Parameters description for [EMPATICA_ACCELEROMETER][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_ACCELEROMETER features from the DBDP provider [FEATURES] Features to be computed, see table below Features description for [EMPATICA_ACCELEROMETER][PROVIDERS][RAPDBDPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is too big to fit in memory. We are considering different alternatives to overcome this problem, if this is something you need, get in touch and we can discuss how to implement it.","title":"DBDP provider"},{"location":"features/empatica-blood-volume-pulse/","text":"Empatica Blood Volume Pulse \u00b6 Sensor parameters description for [EMPATICA_BLOOD_VOLUME_PULSE] : Key Description [CONTAINER] Name of the CSV file containing blood volume pulse data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_blood_volume_pulse_raw.csv - data/raw/ { pid } /empatica_blood_volume_pulse_with_datetime.csv - data/interim/ { pid } /empatica_blood_volume_pulse_features/empatica_blood_volume_pulse_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_blood_volume_pulse.csv Parameters description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_BLOOD_VOLUME_PULSE features from the DBDP provider [FEATURES] Features to be computed from blood volume pulse intraday data, see table below Features description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP] : Feature Units Description maxbvp - The maximum blood volume pulse during a time segment. minbvp - The minimum blood volume pulse during a time segment. avgbvp - The average blood volume pulse during a time segment. medianbvp - The median of blood volume pulse during a time segment. modebvp - The mode of blood volume pulse during a time segment. stdbvp - The standard deviation of blood volume pulse during a time segment. diffmaxmodebvp - The difference between the maximum and mode blood volume pulse during a time segment. diffminmodebvp - The difference between the mode and minimum blood volume pulse during a time segment. entropybvp nats Shannon\u2019s entropy measurement based on blood volume pulse during a time segment. Assumptions/Observations For more information about BVP read this .","title":"Empatica Blood Volume Pulse"},{"location":"features/empatica-blood-volume-pulse/#empatica-blood-volume-pulse","text":"Sensor parameters description for [EMPATICA_BLOOD_VOLUME_PULSE] : Key Description [CONTAINER] Name of the CSV file containing blood volume pulse data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Blood Volume Pulse"},{"location":"features/empatica-blood-volume-pulse/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_blood_volume_pulse_raw.csv - data/raw/ { pid } /empatica_blood_volume_pulse_with_datetime.csv - data/interim/ { pid } /empatica_blood_volume_pulse_features/empatica_blood_volume_pulse_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_blood_volume_pulse.csv Parameters description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_BLOOD_VOLUME_PULSE features from the DBDP provider [FEATURES] Features to be computed from blood volume pulse intraday data, see table below Features description for [EMPATICA_BLOOD_VOLUME_PULSE][PROVIDERS][DBDP] : Feature Units Description maxbvp - The maximum blood volume pulse during a time segment. minbvp - The minimum blood volume pulse during a time segment. avgbvp - The average blood volume pulse during a time segment. medianbvp - The median of blood volume pulse during a time segment. modebvp - The mode of blood volume pulse during a time segment. stdbvp - The standard deviation of blood volume pulse during a time segment. diffmaxmodebvp - The difference between the maximum and mode blood volume pulse during a time segment. diffminmodebvp - The difference between the mode and minimum blood volume pulse during a time segment. entropybvp nats Shannon\u2019s entropy measurement based on blood volume pulse during a time segment. Assumptions/Observations For more information about BVP read this .","title":"DBDP provider"},{"location":"features/empatica-electrodermal-activity/","text":"Empatica Electrodermal Activity \u00b6 Sensor parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY] : Key Description [CONTAINER] Name of the CSV file containing electrodermal activity data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_electrodermal_activity_raw.csv - data/raw/ { pid } /empatica_electrodermal_activity_with_datetime.csv - data/interim/ { pid } /empatica_electrodermal_activity_features/empatica_electrodermal activity_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_electrodermal_activity.csv Parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_ELECTRODERMAL_ACTIVITY features from the DBDP provider [FEATURES] Features to be computed from electrodermal activity intraday data, see table below Features description for [EMPATICA_ELECTRODERMAL ACTIVITY][PROVIDERS][DBDP] : Feature Units Description maxeda microsiemens The maximum electrical conductance during a time segment. mineda microsiemens The minimum electrical conductance during a time segment. avgeda microsiemens The average electrical conductance during a time segment. medianeda microsiemens The median of electrical conductance during a time segment. modeeda microsiemens The mode of electrical conductance during a time segment. stdeda microsiemens The standard deviation of electrical conductance during a time segment. diffmaxmodeeda microsiemens The difference between the maximum and mode electrical conductance during a time segment. diffminmodeeda microsiemens The difference between the mode and minimum electrical conductance during a time segment. entropyeda nats Shannon\u2019s entropy measurement based on electrical conductance during a time segment. Assumptions/Observations None","title":"Empatica Electrodermal Activity"},{"location":"features/empatica-electrodermal-activity/#empatica-electrodermal-activity","text":"Sensor parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY] : Key Description [CONTAINER] Name of the CSV file containing electrodermal activity data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Electrodermal Activity"},{"location":"features/empatica-electrodermal-activity/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_electrodermal_activity_raw.csv - data/raw/ { pid } /empatica_electrodermal_activity_with_datetime.csv - data/interim/ { pid } /empatica_electrodermal_activity_features/empatica_electrodermal activity_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_electrodermal_activity.csv Parameters description for [EMPATICA_ELECTRODERMAL_ACTIVITY][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_ELECTRODERMAL_ACTIVITY features from the DBDP provider [FEATURES] Features to be computed from electrodermal activity intraday data, see table below Features description for [EMPATICA_ELECTRODERMAL ACTIVITY][PROVIDERS][DBDP] : Feature Units Description maxeda microsiemens The maximum electrical conductance during a time segment. mineda microsiemens The minimum electrical conductance during a time segment. avgeda microsiemens The average electrical conductance during a time segment. medianeda microsiemens The median of electrical conductance during a time segment. modeeda microsiemens The mode of electrical conductance during a time segment. stdeda microsiemens The standard deviation of electrical conductance during a time segment. diffmaxmodeeda microsiemens The difference between the maximum and mode electrical conductance during a time segment. diffminmodeeda microsiemens The difference between the mode and minimum electrical conductance during a time segment. entropyeda nats Shannon\u2019s entropy measurement based on electrical conductance during a time segment. Assumptions/Observations None","title":"DBDP provider"},{"location":"features/empatica-heartrate/","text":"Empatica Heart Rate \u00b6 Sensor parameters description for [EMPATICA_HEARTRATE] : Key Description [CONTAINER] Name of the CSV file containing heart rate data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_heartrate_raw.csv - data/raw/ { pid } /empatica_heartrate_with_datetime.csv - data/interim/ { pid } /empatica_heartrate_features/empatica_heartrate_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_heartrate.csv Parameters description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_HEARTRATE features from the DBDP provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP] : Feature Units Description maxhr beats The maximum heart rate during a time segment. minhr beats The minimum heart rate during a time segment. avghr beats The average heart rate during a time segment. medianhr beats The median of heart rate during a time segment. modehr beats The mode of heart rate during a time segment. stdhr beats The standard deviation of heart rate during a time segment. diffmaxmodehr beats The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. Assumptions/Observations We extract the previous features based on the average heart rate values computed in 10-second windows .","title":"Empatica Heart Rate"},{"location":"features/empatica-heartrate/#empatica-heart-rate","text":"Sensor parameters description for [EMPATICA_HEARTRATE] : Key Description [CONTAINER] Name of the CSV file containing heart rate data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Heart Rate"},{"location":"features/empatica-heartrate/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_heartrate_raw.csv - data/raw/ { pid } /empatica_heartrate_with_datetime.csv - data/interim/ { pid } /empatica_heartrate_features/empatica_heartrate_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_heartrate.csv Parameters description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_HEARTRATE features from the DBDP provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [EMPATICA_HEARTRATE][PROVIDERS][DBDP] : Feature Units Description maxhr beats The maximum heart rate during a time segment. minhr beats The minimum heart rate during a time segment. avghr beats The average heart rate during a time segment. medianhr beats The median of heart rate during a time segment. modehr beats The mode of heart rate during a time segment. stdhr beats The standard deviation of heart rate during a time segment. diffmaxmodehr beats The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. Assumptions/Observations We extract the previous features based on the average heart rate values computed in 10-second windows .","title":"DBDP provider"},{"location":"features/empatica-inter-beat-interval/","text":"Empatica Inter Beat Interval \u00b6 Sensor parameters description for [EMPATICA_INTER_BEAT_INTERVAL] : Key Description [CONTAINER] Name of the CSV file containing inter beat interval data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_inter_beat_interval_raw.csv - data/raw/ { pid } /empatica_inter_beat_interval_with_datetime.csv - data/interim/ { pid } /empatica_inter_beat_interval_features/empatica_inter_beat_interval_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_inter_beat_interval.csv Parameters description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_INTER_BEAT_INTERVAL features from the DBDP provider [FEATURES] Features to be computed from inter beat interval intraday data, see table below Features description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP] : Feature Units Description maxibi seconds The maximum inter beat interval during a time segment. minibi seconds The minimum inter beat interval during a time segment. avgibi seconds The average inter beat interval during a time segment. medianibi seconds The median of inter beat interval during a time segment. modeibi seconds The mode of inter beat interval during a time segment. stdibi seconds The standard deviation of inter beat interval during a time segment. diffmaxmodeibi seconds The difference between the maximum and mode inter beat interval during a time segment. diffminmodeibi seconds The difference between the mode and minimum inter beat interval during a time segment. entropyibi nats Shannon\u2019s entropy measurement based on inter beat interval during a time segment. Assumptions/Observations For more information about IBI read this .","title":"Empatica Inter Beat Interval"},{"location":"features/empatica-inter-beat-interval/#empatica-inter-beat-interval","text":"Sensor parameters description for [EMPATICA_INTER_BEAT_INTERVAL] : Key Description [CONTAINER] Name of the CSV file containing inter beat interval data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Inter Beat Interval"},{"location":"features/empatica-inter-beat-interval/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_inter_beat_interval_raw.csv - data/raw/ { pid } /empatica_inter_beat_interval_with_datetime.csv - data/interim/ { pid } /empatica_inter_beat_interval_features/empatica_inter_beat_interval_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_inter_beat_interval.csv Parameters description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_INTER_BEAT_INTERVAL features from the DBDP provider [FEATURES] Features to be computed from inter beat interval intraday data, see table below Features description for [EMPATICA_INTER_BEAT_INTERVAL][PROVIDERS][DBDP] : Feature Units Description maxibi seconds The maximum inter beat interval during a time segment. minibi seconds The minimum inter beat interval during a time segment. avgibi seconds The average inter beat interval during a time segment. medianibi seconds The median of inter beat interval during a time segment. modeibi seconds The mode of inter beat interval during a time segment. stdibi seconds The standard deviation of inter beat interval during a time segment. diffmaxmodeibi seconds The difference between the maximum and mode inter beat interval during a time segment. diffminmodeibi seconds The difference between the mode and minimum inter beat interval during a time segment. entropyibi nats Shannon\u2019s entropy measurement based on inter beat interval during a time segment. Assumptions/Observations For more information about IBI read this .","title":"DBDP provider"},{"location":"features/empatica-tags/","text":"Empatica Tags \u00b6 Sensor parameters description for [EMPATICA_TAGS] : Key Description [CONTAINER] Name of the CSV file containing tags data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. Note No feature providers have been implemented for this sensor yet, however you can implement your own features . To know more about tags read this .","title":"Empatica Tags"},{"location":"features/empatica-tags/#empatica-tags","text":"Sensor parameters description for [EMPATICA_TAGS] : Key Description [CONTAINER] Name of the CSV file containing tags data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. Note No feature providers have been implemented for this sensor yet, however you can implement your own features . To know more about tags read this .","title":"Empatica Tags"},{"location":"features/empatica-temperature/","text":"Empatica Temperature \u00b6 Sensor parameters description for [EMPATICA_TEMPERATURE] : Key Description [CONTAINER] Name of the CSV file containing temperature data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute. DBDP provider \u00b6 Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_temperature_raw.csv - data/raw/ { pid } /empatica_temperature_with_datetime.csv - data/interim/ { pid } /empatica_temperature_features/empatica_temperature_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_temperature.csv Parameters description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_TEMPERATURE features from the DBDP provider [FEATURES] Features to be computed from temperature intraday data, see table below Features description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP] : Feature Units Description maxtemp degrees C The maximum temperature during a time segment. mintemp degrees C The minimum temperature during a time segment. avgtemp degrees C The average temperature during a time segment. mediantemp degrees C The median of temperature during a time segment. modetemp degrees C The mode of temperature during a time segment. stdtemp degrees C The standard deviation of temperature during a time segment. diffmaxmodetemp degrees C The difference between the maximum and mode temperature during a time segment. diffminmodetemp degrees C The difference between the mode and minimum temperature during a time segment. entropytemp nats Shannon\u2019s entropy measurement based on temperature during a time segment. Assumptions/Observations None","title":"Empatica Temperature"},{"location":"features/empatica-temperature/#empatica-temperature","text":"Sensor parameters description for [EMPATICA_TEMPERATURE] : Key Description [CONTAINER] Name of the CSV file containing temperature data that is compressed inside an Empatica zip file. Since these zip files are created automatically by Empatica, there is no need to change the value of this attribute.","title":"Empatica Temperature"},{"location":"features/empatica-temperature/#dbdp-provider","text":"Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /empatica_temperature_raw.csv - data/raw/ { pid } /empatica_temperature_with_datetime.csv - data/interim/ { pid } /empatica_temperature_features/empatica_temperature_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /empatica_temperature.csv Parameters description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP] : Key Description [COMPUTE] Set to True to extract EMPATICA_TEMPERATURE features from the DBDP provider [FEATURES] Features to be computed from temperature intraday data, see table below Features description for [EMPATICA_TEMPERATURE][PROVIDERS][DBDP] : Feature Units Description maxtemp degrees C The maximum temperature during a time segment. mintemp degrees C The minimum temperature during a time segment. avgtemp degrees C The average temperature during a time segment. mediantemp degrees C The median of temperature during a time segment. modetemp degrees C The mode of temperature during a time segment. stdtemp degrees C The standard deviation of temperature during a time segment. diffmaxmodetemp degrees C The difference between the maximum and mode temperature during a time segment. diffminmodetemp degrees C The difference between the mode and minimum temperature during a time segment. entropytemp nats Shannon\u2019s entropy measurement based on temperature during a time segment. Assumptions/Observations None","title":"DBDP provider"},{"location":"features/feature-introduction/","text":"Behavioral Features Introduction \u00b6 A behavioral feature is a metric computed from raw sensor data quantifying the behavior of a participant. For example, the time spent at home computed based on location data. These are also known as digital biomarkers. RAPIDS\u2019 config.yaml has a section for each supported device/sensor (e.g., PHONE_ACCELEROMETER , FITBIT_STEPS , EMPATICA_HEARTRATE ). These sections follow a similar structure, and they can have one or more feature PROVIDERS , that compute one or more behavioral features. You will modify the parameters of these PROVIDERS to obtain features from different mobile sensors. We\u2019ll use PHONE_ACCELEROMETER as an example to explain this further. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. In short, to extract features offered by a provider, you need to set its [COMPUTE] flag to TRUE , configure any of its parameters, and execute RAPIDS. Explaining the config.yaml sensor sections with an example \u00b6 Each sensor section follows the same structure. Click on the numbered markers to know more. PHONE_ACCELEROMETER : # (1) CONTAINER : accelerometer # (2) PROVIDERS : # (3) RAPIDS : COMPUTE : False # (4) FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_SCRIPT : src/features/phone_accelerometer/rapids/main.py PANDA : COMPUTE : False VALID_SENSED_MINUTES : False FEATURES : # (5) exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] # (6) SRC_SCRIPT : src/features/phone_accelerometer/panda/main.py Sensor section Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS . Sensor Parameters. Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed. The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers Each object in this list represents a feature PROVIDER . Each sensor can have zero, one, or more providers. A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . In this example, there are two accelerometer feature providers RAPIDS and PANDA . PROVIDER Parameters Each PROVIDER has parameters that affect the computation of the behavioral features it offers. These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. PROVIDER Features Each PROVIDER offers a set of behavioral features. These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider. In either case, you can delete the features you are not interested in, and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page. PROVIDER script Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features. It has to be a relative path from RAPIDS\u2019 root folder and the script\u2019s parent folder should be named after the provider, e.g. panda . These are the descriptions of each marker for accessibility: Sensor section Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS . Sensor Parameters. Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed. The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers Each object in this list represents a feature PROVIDER . Each sensor can have zero, one, or more providers. A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . In this example, there are two accelerometer feature providers RAPIDS and PANDA . PROVIDER Parameters Each PROVIDER has parameters that affect the computation of the behavioral features it offers. These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. PROVIDER Features Each PROVIDER offers a set of behavioral features. These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider. In either case, you can delete the features you are not interested in, and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page. PROVIDER script Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features. It has to be a relative path from RAPIDS\u2019 root folder and the script\u2019s parent folder should be named after the provider, e.g. panda .","title":"Introduction"},{"location":"features/feature-introduction/#behavioral-features-introduction","text":"A behavioral feature is a metric computed from raw sensor data quantifying the behavior of a participant. For example, the time spent at home computed based on location data. These are also known as digital biomarkers. RAPIDS\u2019 config.yaml has a section for each supported device/sensor (e.g., PHONE_ACCELEROMETER , FITBIT_STEPS , EMPATICA_HEARTRATE ). These sections follow a similar structure, and they can have one or more feature PROVIDERS , that compute one or more behavioral features. You will modify the parameters of these PROVIDERS to obtain features from different mobile sensors. We\u2019ll use PHONE_ACCELEROMETER as an example to explain this further. Hint We recommend reading this page if you are using RAPIDS for the first time All computed sensor features are stored under /data/processed/features on files per sensor, per participant and per study (all participants). Every time you change any sensor parameters, provider parameters or provider features, all the necessary files will be updated as soon as you execute RAPIDS. In short, to extract features offered by a provider, you need to set its [COMPUTE] flag to TRUE , configure any of its parameters, and execute RAPIDS.","title":"Behavioral Features Introduction"},{"location":"features/feature-introduction/#explaining-the-configyaml-sensor-sections-with-an-example","text":"Each sensor section follows the same structure. Click on the numbered markers to know more. PHONE_ACCELEROMETER : # (1) CONTAINER : accelerometer # (2) PROVIDERS : # (3) RAPIDS : COMPUTE : False # (4) FEATURES : [ \"maxmagnitude\" , \"minmagnitude\" , \"avgmagnitude\" , \"medianmagnitude\" , \"stdmagnitude\" ] SRC_SCRIPT : src/features/phone_accelerometer/rapids/main.py PANDA : COMPUTE : False VALID_SENSED_MINUTES : False FEATURES : # (5) exertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] nonexertional_activity_episode : [ \"sumduration\" , \"maxduration\" , \"minduration\" , \"avgduration\" , \"medianduration\" , \"stdduration\" ] # (6) SRC_SCRIPT : src/features/phone_accelerometer/panda/main.py Sensor section Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS . Sensor Parameters. Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed. The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers Each object in this list represents a feature PROVIDER . Each sensor can have zero, one, or more providers. A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . In this example, there are two accelerometer feature providers RAPIDS and PANDA . PROVIDER Parameters Each PROVIDER has parameters that affect the computation of the behavioral features it offers. These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. PROVIDER Features Each PROVIDER offers a set of behavioral features. These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider. In either case, you can delete the features you are not interested in, and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page. PROVIDER script Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features. It has to be a relative path from RAPIDS\u2019 root folder and the script\u2019s parent folder should be named after the provider, e.g. panda . These are the descriptions of each marker for accessibility: Sensor section Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS . Sensor Parameters. Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed. The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers Each object in this list represents a feature PROVIDER . Each sensor can have zero, one, or more providers. A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . In this example, there are two accelerometer feature providers RAPIDS and PANDA . PROVIDER Parameters Each PROVIDER has parameters that affect the computation of the behavioral features it offers. These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. PROVIDER Features Each PROVIDER offers a set of behavioral features. These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider. In either case, you can delete the features you are not interested in, and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page. PROVIDER script Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features. It has to be a relative path from RAPIDS\u2019 root folder and the script\u2019s parent folder should be named after the provider, e.g. panda .","title":"Explaining the config.yaml sensor sections with an example"},{"location":"features/fitbit-calories-intraday/","text":"Fitbit Calories Intraday \u00b6 Sensor parameters description for [FITBIT_CALORIES_INTRADAY] : Key Description [CONTAINER] Container where your calories intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_calories_intraday_raw.csv - data/raw/ { pid } /fitbit_calories_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_calories_intraday_features/fitbit_calories_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_calories_intraday.csv Parameters description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_CALORIES_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from calories intraday data, see table below [EPISODE_TYPE] RAPIDS will compute features for any episodes in this list. There are seven types of episodes defined as consecutive appearances of a label. Four are based on the activity level labels provided by Fitbit: sedentary , lightly active , fairly active , and very active . One is defined by RAPIDS as moderate to vigorous physical activity MVPA episodes that are based on all fairly active , and very active labels. Two are defined by the user based on a threshold that divides low or high MET (metabolic equivalent) episodes. EPISODE_TIME_THRESHOLD Any consecutive rows of the same [EPISODE_TYPE] will be considered a single episode if the time difference between them is less or equal than this threshold in minutes [EPISODE_MET_THRESHOLD] Any 1-minute calorie data chunk with a MET value equal or higher than this threshold will be considered a high MET episode and low MET otherwise. The default value is 3 [EPISODE_MVPA_CATEGORIES] The Fitbit level labels that are considered part of a moderate to vigorous physical activity episode. One or more of sedentary , lightly active , fairly active , and very active . The default are fairly active and very active [EPISODE_REFERENCE_TIME] Reference time for the start/end time features. MIDNIGHT sets the reference time to 00:00 of each day, START_OF_THE_SEGMENT sets the reference time to the start of the time segment (useful when a segment is shorter than a day or spans multiple days) Features description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description starttimefirstepisode EPISODE_TYPE minutes Start time of the first episode of type [EPISODE_TYPE] endtimefirstepisode EPISODE_TYPE minutes End time of the first episode of type [EPISODE_TYPE] starttimelastepisode EPISODE_TYPE minutes Start time of the last episode of type [EPISODE_TYPE] endtimelastepisode EPISODE_TYPE minutes End time of the last episode of type [EPISODE_TYPE] starttimelongestepisode EPISODE_TYPE minutes Start time of the longest episode of type [EPISODE_TYPE] endtimelongestepisode EPISODE_TYPE minutes End time of the longest episode of type [EPISODE_TYPE] countepisode EPISODE_TYPE episodes The number of episodes of type [EPISODE_TYPE] sumdurationepisode EPISODE_TYPE minutes The sum of the duration of episodes of type [EPISODE_TYPE] avgdurationepisode EPISODE_TYPE minutes The average of the duration of episodes of type [EPISODE_TYPE] maxdurationepisode EPISODE_TYPE minutes The maximum of the duration of episodes of type [EPISODE_TYPE] mindurationepisode EPISODE_TYPE minutes The minimum of the duration of episodes of type [EPISODE_TYPE] stddurationepisode EPISODE_TYPE minutes The standard deviation of the duration of episodes of type [EPISODE_TYPE] summet EPISODE_TYPE METs The sum of all METs during episodes of type [EPISODE_TYPE] avgmet EPISODE_TYPE METs The average of all METs during episodes of type [EPISODE_TYPE] maxmet EPISODE_TYPE METs The maximum of all METs during episodes of type [EPISODE_TYPE] minmet EPISODE_TYPE METs The minimum of all METs during episodes of type [EPISODE_TYPE] stdmet EPISODE_TYPE METs The standard deviation of all METs during episodes of type [EPISODE_TYPE] sumcalories EPISODE_TYPE calories The sum of all calories during episodes of type [EPISODE_TYPE] avgcalories EPISODE_TYPE calories The average of all calories during episodes of type [EPISODE_TYPE] maxcalories EPISODE_TYPE calories The maximum of all calories during episodes of type [EPISODE_TYPE] mincalories EPISODE_TYPE calories The minimum of all calories during episodes of type [EPISODE_TYPE] stdcalories EPISODE_TYPE calories The standard deviation of all calories during episodes of type [EPISODE_TYPE] Assumptions/Observations These features are based on intraday calories data that is usually obtained in 1-minute chunks from Fitbit\u2019s API. The MET value returned by Fitbit is divided by 10 Take into account that the intraday data returned by Fitbit can contain time series for calories burned inclusive of BMR, tracked activity, and manually logged activities.","title":"Fitbit Calories Intraday"},{"location":"features/fitbit-calories-intraday/#fitbit-calories-intraday","text":"Sensor parameters description for [FITBIT_CALORIES_INTRADAY] : Key Description [CONTAINER] Container where your calories intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Calories Intraday"},{"location":"features/fitbit-calories-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_calories_intraday_raw.csv - data/raw/ { pid } /fitbit_calories_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_calories_intraday_features/fitbit_calories_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_calories_intraday.csv Parameters description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_CALORIES_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from calories intraday data, see table below [EPISODE_TYPE] RAPIDS will compute features for any episodes in this list. There are seven types of episodes defined as consecutive appearances of a label. Four are based on the activity level labels provided by Fitbit: sedentary , lightly active , fairly active , and very active . One is defined by RAPIDS as moderate to vigorous physical activity MVPA episodes that are based on all fairly active , and very active labels. Two are defined by the user based on a threshold that divides low or high MET (metabolic equivalent) episodes. EPISODE_TIME_THRESHOLD Any consecutive rows of the same [EPISODE_TYPE] will be considered a single episode if the time difference between them is less or equal than this threshold in minutes [EPISODE_MET_THRESHOLD] Any 1-minute calorie data chunk with a MET value equal or higher than this threshold will be considered a high MET episode and low MET otherwise. The default value is 3 [EPISODE_MVPA_CATEGORIES] The Fitbit level labels that are considered part of a moderate to vigorous physical activity episode. One or more of sedentary , lightly active , fairly active , and very active . The default are fairly active and very active [EPISODE_REFERENCE_TIME] Reference time for the start/end time features. MIDNIGHT sets the reference time to 00:00 of each day, START_OF_THE_SEGMENT sets the reference time to the start of the time segment (useful when a segment is shorter than a day or spans multiple days) Features description for [FITBIT_CALORIES_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description starttimefirstepisode EPISODE_TYPE minutes Start time of the first episode of type [EPISODE_TYPE] endtimefirstepisode EPISODE_TYPE minutes End time of the first episode of type [EPISODE_TYPE] starttimelastepisode EPISODE_TYPE minutes Start time of the last episode of type [EPISODE_TYPE] endtimelastepisode EPISODE_TYPE minutes End time of the last episode of type [EPISODE_TYPE] starttimelongestepisode EPISODE_TYPE minutes Start time of the longest episode of type [EPISODE_TYPE] endtimelongestepisode EPISODE_TYPE minutes End time of the longest episode of type [EPISODE_TYPE] countepisode EPISODE_TYPE episodes The number of episodes of type [EPISODE_TYPE] sumdurationepisode EPISODE_TYPE minutes The sum of the duration of episodes of type [EPISODE_TYPE] avgdurationepisode EPISODE_TYPE minutes The average of the duration of episodes of type [EPISODE_TYPE] maxdurationepisode EPISODE_TYPE minutes The maximum of the duration of episodes of type [EPISODE_TYPE] mindurationepisode EPISODE_TYPE minutes The minimum of the duration of episodes of type [EPISODE_TYPE] stddurationepisode EPISODE_TYPE minutes The standard deviation of the duration of episodes of type [EPISODE_TYPE] summet EPISODE_TYPE METs The sum of all METs during episodes of type [EPISODE_TYPE] avgmet EPISODE_TYPE METs The average of all METs during episodes of type [EPISODE_TYPE] maxmet EPISODE_TYPE METs The maximum of all METs during episodes of type [EPISODE_TYPE] minmet EPISODE_TYPE METs The minimum of all METs during episodes of type [EPISODE_TYPE] stdmet EPISODE_TYPE METs The standard deviation of all METs during episodes of type [EPISODE_TYPE] sumcalories EPISODE_TYPE calories The sum of all calories during episodes of type [EPISODE_TYPE] avgcalories EPISODE_TYPE calories The average of all calories during episodes of type [EPISODE_TYPE] maxcalories EPISODE_TYPE calories The maximum of all calories during episodes of type [EPISODE_TYPE] mincalories EPISODE_TYPE calories The minimum of all calories during episodes of type [EPISODE_TYPE] stdcalories EPISODE_TYPE calories The standard deviation of all calories during episodes of type [EPISODE_TYPE] Assumptions/Observations These features are based on intraday calories data that is usually obtained in 1-minute chunks from Fitbit\u2019s API. The MET value returned by Fitbit is divided by 10 Take into account that the intraday data returned by Fitbit can contain time series for calories burned inclusive of BMR, tracked activity, and manually logged activities.","title":"RAPIDS provider"},{"location":"features/fitbit-data-yield/","text":"Fitbit Data Yield \u00b6 We use Fitbit heart rate intraday data to extract data yield features. Fitbit data yield features can be used to remove rows ( time segments ) that do not contain enough Fitbit data. You should decide what is your \u201cenough\u201d threshold depending on the time a participant was supposed to be wearing their Fitbit, the length of your study, and the rates of missing data that your analysis could handle. Why is Fitbit data yield important? Imagine that you want to extract FITBIT_STEPS_SUMMARY features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the Fitbit logged 6k as the total step count and the heart rate sensor logged 24 hours of data and on day 2 the Fitbit logged 101 as the total step count and the heart rate sensor logged 2 hours of data. It\u2019s very likely that on day 2 you walked during the other 22 hours so including this day in your analysis could bias your results. Sensor parameters description for [FITBIT_DATA_YIELD] : Key Description [SENSORS] The Fitbit sensor we considered for calculating the Fitbit data yield features. We only support FITBIT_HEARTRATE_INTRADAY since sleep data is commonly collected only overnight, and step counts are 0 even when not wearing the Fitbit device. RAPIDS provider \u00b6 Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when Fitbit heart rate intraday sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_data_yield_features/fitbit_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_data_yield.csv Parameters description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes - The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours - The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a . We assume your Fitbit intraday data was sampled (requested form the Fitbit API) at 1 minute intervals, if the interval is longer, for example 15 minutes, you need to take into account that valid minutes and valid hours ratios are going to be small (for example you would have at most 4 \u201cminutes\u201d of data per hour because you would have four 15-minute windows) and so you should adjust your thresholds to include and exclude rows accordingly. If you are in this situation, get in touch with us, we could implement this use case but we are not sure there is enough demand for it at the moment since you can control the sampling rate of the data you request from Fitbit API.","title":"Fitbit Data Yield"},{"location":"features/fitbit-data-yield/#fitbit-data-yield","text":"We use Fitbit heart rate intraday data to extract data yield features. Fitbit data yield features can be used to remove rows ( time segments ) that do not contain enough Fitbit data. You should decide what is your \u201cenough\u201d threshold depending on the time a participant was supposed to be wearing their Fitbit, the length of your study, and the rates of missing data that your analysis could handle. Why is Fitbit data yield important? Imagine that you want to extract FITBIT_STEPS_SUMMARY features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the Fitbit logged 6k as the total step count and the heart rate sensor logged 24 hours of data and on day 2 the Fitbit logged 101 as the total step count and the heart rate sensor logged 2 hours of data. It\u2019s very likely that on day 2 you walked during the other 22 hours so including this day in your analysis could bias your results. Sensor parameters description for [FITBIT_DATA_YIELD] : Key Description [SENSORS] The Fitbit sensor we considered for calculating the Fitbit data yield features. We only support FITBIT_HEARTRATE_INTRADAY since sleep data is commonly collected only overnight, and step counts are 0 even when not wearing the Fitbit device.","title":"Fitbit Data Yield"},{"location":"features/fitbit-data-yield/#rapids-provider","text":"Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when Fitbit heart rate intraday sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] Available time segments and platforms Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_data_yield_features/fitbit_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_data_yield.csv Parameters description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [FITBIT_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes - The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours - The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a . We assume your Fitbit intraday data was sampled (requested form the Fitbit API) at 1 minute intervals, if the interval is longer, for example 15 minutes, you need to take into account that valid minutes and valid hours ratios are going to be small (for example you would have at most 4 \u201cminutes\u201d of data per hour because you would have four 15-minute windows) and so you should adjust your thresholds to include and exclude rows accordingly. If you are in this situation, get in touch with us, we could implement this use case but we are not sure there is enough demand for it at the moment since you can control the sampling rate of the data you request from Fitbit API.","title":"RAPIDS provider"},{"location":"features/fitbit-heartrate-intraday/","text":"Fitbit Heart Rate Intraday \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [CONTAINER] Container where your heart rate intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#fitbit-heart-rate-intraday","text":"Sensor parameters description for [FITBIT_HEARTRATE_INTRADAY] : Key Description [CONTAINER] Container where your heart rate intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Heart Rate Intraday"},{"location":"features/fitbit-heartrate-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_heartrate_intraday_raw.csv - data/raw/ { pid } /fitbit_heartrate_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_intraday_features/fitbit_heartrate_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_intraday.csv Parameters description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate intraday data, see table below Features description for [FITBIT_HEARTRATE_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description maxhr beats/mins The maximum heart rate during a time segment. minhr beats/mins The minimum heart rate during a time segment. avghr beats/mins The average heart rate during a time segment. medianhr beats/mins The median of heart rate during a time segment. modehr beats/mins The mode of heart rate during a time segment. stdhr beats/mins The standard deviation of heart rate during a time segment. diffmaxmodehr beats/mins The difference between the maximum and mode heart rate during a time segment. diffminmodehr beats/mins The difference between the mode and minimum heart rate during a time segment. entropyhr nats Shannon\u2019s entropy measurement based on heart rate during a time segment. minutesonZONE minutes Number of minutes the user\u2019s heart rate fell within each heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed.","title":"RAPIDS provider"},{"location":"features/fitbit-heartrate-summary/","text":"Fitbit Heart Rate Summary \u00b6 Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [CONTAINER] Container where your heart rate summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#fitbit-heart-rate-summary","text":"Sensor parameters description for [FITBIT_HEARTRATE_SUMMARY] : Key Description [CONTAINER] Container where your heart rate summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Heart Rate Summary"},{"location":"features/fitbit-heartrate-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_heartrate_summary_raw.csv - data/raw/ { pid } /fitbit_heartrate_summary_with_datetime.csv - data/interim/ { pid } /fitbit_heartrate_summary_features/fitbit_heartrate_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_heartrate_summary.csv Parameters description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_HEARTRATE_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from heart rate summary data, see table below Features description for [FITBIT_HEARTRATE_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxrestinghr beats/mins The maximum daily resting heart rate during a time segment. minrestinghr beats/mins The minimum daily resting heart rate during a time segment. avgrestinghr beats/mins The average daily resting heart rate during a time segment. medianrestinghr beats/mins The median of daily resting heart rate during a time segment. moderestinghr beats/mins The mode of daily resting heart rate during a time segment. stdrestinghr beats/mins The standard deviation of daily resting heart rate during a time segment. diffmaxmoderestinghr beats/mins The difference between the maximum and mode daily resting heart rate during a time segment. diffminmoderestinghr beats/mins The difference between the mode and minimum daily resting heart rate during a time segment. entropyrestinghr nats Shannon\u2019s entropy measurement based on daily resting heart rate during a time segment. sumcaloriesZONE cals The total daily calories burned within heartrate_zone during a time segment. maxcaloriesZONE cals The maximum daily calories burned within heartrate_zone during a time segment. mincaloriesZONE cals The minimum daily calories burned within heartrate_zone during a time segment. avgcaloriesZONE cals The average daily calories burned within heartrate_zone during a time segment. mediancaloriesZONE cals The median of daily calories burned within heartrate_zone during a time segment. stdcaloriesZONE cals The standard deviation of daily calories burned within heartrate_zone during a time segment. entropycaloriesZONE nats Shannon\u2019s entropy measurement based on daily calories burned within heartrate_zone during a time segment. Assumptions/Observations There are four heart rate zones (ZONE): outofrange , fatburn , cardio , and peak . Please refer to Fitbit documentation for more information about the way they are computed. Calories\u2019 accuracy depends on the users\u2019 Fitbit profile (weight, height, etc.).","title":"RAPIDS provider"},{"location":"features/fitbit-sleep-intraday/","text":"Fitbit Sleep Intraday \u00b6 Sensor parameters description for [FITBIT_SLEEP_INTRADAY] : Key Description [CONTAINER] Container where your sleep intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Understanding RAPIDS features This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider. Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_sleep_intraday_raw.csv - data/raw/ { pid } /fitbit_sleep_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_features/fitbit_sleep_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_intraday.csv Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from sleep intraday data, see table below [SLEEP_LEVELS] Fitbit\u2019s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES . STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels ( awake , restless , and asleep ), STAGES contains 4 sleep levels ( wake , deep , light , rem ). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake ( CLASSIC : awake and restless ; STAGES : wake ) and asleep ( CLASSIC : asleep ; STAGES : deep , light , and rem ). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes LEVELS_AND_TYPES features grouping all levels together in a single all category. [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. There are three sleep types: main , nap , and all . The all type means both main sleep and naps are considered. Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS][LEVELS_AND_TYPES] : Feature Units Description countepisode [LEVEL][TYPE] episodes Number of [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. sumduration [LEVEL][TYPE] minutes Total duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. maxduration [LEVEL][TYPE] minutes Longest duration of any [LEVEL][TYPE] sleep episode. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. minduration [LEVEL][TYPE] minutes Shortest duration of any [LEVEL][TYPE] sleep episode. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. avgduration [LEVEL][TYPE] minutes Average duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. medianduration [LEVEL][TYPE] minutes Median duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. stdduration [LEVEL][TYPE] minutes Standard deviation duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_LEVELS] : Feature Units Description ratiocount [LEVEL] - Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem episodes were rem ? (e.g., \\(countepisode[remstages][all] / countepisode[all][all]\\) ) ratioduration [LEVEL] - Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem time was rem ? (e.g., \\(sumduration[remstages][all] / sumduration[all][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_TYPES] : Feature Units Description ratiocountmain - Ratio between the count of all main episodes (independently of the levels inside) divided by the count of all main and nap episodes. This answers the question: what percentage of all sleep episodes ( main and nap ) were main ? We do not provide the ratio for nap because is complementary. ( \\(countepisode[all][main] / countepisode[all][all]\\) ) ratiodurationmain - Ratio between the duration of all main episodes (independently of the levels inside) divided by the duration of all main and nap episodes. This answers the question: what percentage of all sleep time ( main and nap ) was main ? We do not provide the ratio for nap because is complementary. ( \\(sumduration[all][main] / sumduration[all][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_LEVELS] : Feature Units Description ratiocountmainwithin [LEVEL] - Ratio between the count of episodes of a single sleep [LEVEL] during main sleep divided by the count of episodes of a single sleep [LEVEL] during main and nap . This answers the question: are rem episodes more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. ( \\(countepisode[remstages][main] / countepisode[remstages][all]\\) ) ratiodurationmainwithin [LEVEL] - Ratio between the duration of episodes of a single sleep [LEVEL] during main sleep divided by the duration of episodes of a single sleep [LEVEL] during main and nap . This answers the question: is rem time more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. ( \\(countepisode[remstages][main] / countepisode[remstages][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_TYPES] : Feature Units Description ratiocount [LEVEL] within [TYPE] - Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem episodes were rem during main / nap sleep time? (e.g., \\(countepisode[remstages][main] / countepisode[all][main]\\) ) ratioduration [LEVEL] within [TYPE] - Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem time was rem during main / nap sleep time? (e.g., \\(sumduration[remstages][main] / sumduration[all][main]\\) ) Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider. Features listed in [LEVELS_AND_TYPES] are computed for any levels and types listed in [SLEEP_LEVELS] or [SLEEP_TYPES] . For example if STAGES only contains [rem, light] you will not get countepisode[wake|deep][TYPE] or sum, max, min, avg, median, or std duration . Levels or types in these lists do not influence RATIOS or ROUTINE features. Any [LEVEL] grouping is done within the elements of each class CLASSIC , STAGES , and UNIFIED . That is, we never combine CLASSIC or STAGES types to compute features. The categories for all levels (when INCLUDE_ALL_GROUPS is True ) and all SLEEP_TYPES are not considered for RATIOS features as they are always 1. These features can be computed in time segments of any length, but only the 1-minute sleep chunks within each segment instance will be used. PRICE provider \u00b6 Understanding PRICE features This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider. Available time segments Available for any time segments larger or equal to one day File Sequence - data/raw/ { pid } /fitbit_sleep_intraday_raw.csv - data/raw/ { pid } /fitbit_sleep_intraday_parsed.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_features/fitbit_sleep_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_intraday.csv Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_INTRADAY features from the PRICE provider [FEATURES] Features to be computed from sleep intraday data, see table below [SLEEP_LEVELS] Fitbit\u2019s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES . STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels ( awake , restless , and asleep ), STAGES contains 4 sleep levels ( wake , deep , light , rem ). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake ( CLASSIC : awake and restless ; STAGES : wake ) and asleep ( CLASSIC : asleep ; STAGES : deep , light , and rem ). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes avgdurationallmain [DAY_TYPE] features grouping all levels together in a single all category. [DAY_TYPE] The features of this provider can be computed using daily averages/standard deviations that were extracted on WEEKEND days only, WEEK days only, or ALL days [LAST_NIGHT_END] Only main sleep episodes that start within the LNE-LNE interval [ LAST_NIGHT_END , LAST_NIGHT_END + 23H 59M 59S] are taken into account to compute the features described below. [LAST_NIGHT_END] is a number ranging from 0 (midnight) to 1439 (23:59). Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE] : Feature Units Description avgduration [LEVEL] main [DAY_TYPE] minutes Average duration of daily sleep chunks of a LEVEL . Use the DAY_TYPE flag to include daily durations from weekend days only, weekdays, or both. Use [LEVEL] to group all levels in a single all category. avgratioduration [LEVEL] withinmain [DAY_TYPE] - Average of the daily ratio between the duration of sleep chunks of a LEVEL and total duration of all main sleep episodes in a day. When INCLUDE_ALL_GROUPS is True the all LEVEL is ignored since this feature is always 1. Use the DAY_TYPE flag to include start times from weekend days only, weekdays, or both. avgstarttimeofepisodemain [DAY_TYPE] minutes Average of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both. avgendtimeofepisodemain [DAY_TYPE] minutes Average of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. avgmidpointofepisodemain [DAY_TYPE] minutes Average of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdstarttimeofepisodemain [DAY_TYPE] minutes Standard deviation of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdendtimeofepisodemain [DAY_TYPE] minutes Standard deviation of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdmidpointofepisodemain [DAY_TYPE] minutes Standard deviation of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. socialjetlag minutes Difference in minutes between the avgmidpointofepisodemain of weekends and weekdays that belong to each time segment instance. If your time segment does not contain at least one week day and one weekend day this feature will be NA. rmssdmeanstarttimeofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s starttimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s starttimeofepisodemain (bedtime) changed from night to night. rmssdmeanendtimeofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s endtimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s endtimeofepisodemain (wake time) changed from night to night. rmssdmeanmidpointofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s midpointofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s midpointofepisodemain (mid time between bedtime and wake time) changed from night to night. rmssdmedianstarttimeofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s starttimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s starttimeofepisodemain (bedtime) changed from night to night. rmssdmedianendtimeofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s endtimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s endtimeofepisodemain (wake time) changed from night to night. rmssdmedianmidpointofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s midpointofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s midpointofepisodemain (average mid time between bedtime and wake time) changed from night to night. Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider. We recommend you use periodic segments that start in the morning so RAPIDS can chunk and group sleep episodes overnight. Shifted segments (as any other segments) are labelled based on their start and end date times. avgstarttime... and avgendtime... are roughly equivalent to an average bed and awake time only if you are using shifted segments. The features of this provider are only available on time segments that are longer than 24 hours because they are based on descriptive statistics computed across daily values. Even though Fitbit provides 2 types of sleep episodes ( main and nap ), only main sleep episodes are considered. The reference point for all times is 00:00 of the first day in the LNE-LNE interval. Sleep episodes are formed by 1-minute chunks that we group overnight starting from today\u2019s LNE and ending on tomorrow\u2019s LNE or the end of that segment (whatever is first). The features avgstarttime... and avgendtime... are the average of the first and last sleep episode across every LNE-LNE interval within a segment ( avgmidtime... is the mid point between start and end). Therefore, only segments longer than 24hrs will be averaged across more than one LNE-LNE interval. socialjetlag is only available on segment instances equal or longer than 48hrs that contain at least one weekday day and one weekend day, for example seven-day (weekly) segments.","title":"Fitbit Sleep Intraday"},{"location":"features/fitbit-sleep-intraday/#fitbit-sleep-intraday","text":"Sensor parameters description for [FITBIT_SLEEP_INTRADAY] : Key Description [CONTAINER] Container where your sleep intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Sleep Intraday"},{"location":"features/fitbit-sleep-intraday/#rapids-provider","text":"Understanding RAPIDS features This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider. Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_sleep_intraday_raw.csv - data/raw/ { pid } /fitbit_sleep_intraday_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_features/fitbit_sleep_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_intraday.csv Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from sleep intraday data, see table below [SLEEP_LEVELS] Fitbit\u2019s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES . STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels ( awake , restless , and asleep ), STAGES contains 4 sleep levels ( wake , deep , light , rem ). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake ( CLASSIC : awake and restless ; STAGES : wake ) and asleep ( CLASSIC : asleep ; STAGES : deep , light , and rem ). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes LEVELS_AND_TYPES features grouping all levels together in a single all category. [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. There are three sleep types: main , nap , and all . The all type means both main sleep and naps are considered. Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS][LEVELS_AND_TYPES] : Feature Units Description countepisode [LEVEL][TYPE] episodes Number of [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. sumduration [LEVEL][TYPE] minutes Total duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. maxduration [LEVEL][TYPE] minutes Longest duration of any [LEVEL][TYPE] sleep episode. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. minduration [LEVEL][TYPE] minutes Shortest duration of any [LEVEL][TYPE] sleep episode. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. avgduration [LEVEL][TYPE] minutes Average duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. medianduration [LEVEL][TYPE] minutes Median duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. stdduration [LEVEL][TYPE] minutes Standard deviation duration of all [LEVEL][TYPE] sleep episodes. [LEVEL] is one of [SLEEP_LEVELS] (e.g. awake-classic or rem-stages) and [TYPE] is one of [SLEEP_TYPES] (e.g. main). [LEVEL] can also be all when INCLUDE_ALL_GROUPS is True, which ignores the levels and groups by sleep types. Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_LEVELS] : Feature Units Description ratiocount [LEVEL] - Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem episodes were rem ? (e.g., \\(countepisode[remstages][all] / countepisode[all][all]\\) ) ratioduration [LEVEL] - Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during both main and nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem time was rem ? (e.g., \\(sumduration[remstages][all] / sumduration[all][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [ACROSS_TYPES] : Feature Units Description ratiocountmain - Ratio between the count of all main episodes (independently of the levels inside) divided by the count of all main and nap episodes. This answers the question: what percentage of all sleep episodes ( main and nap ) were main ? We do not provide the ratio for nap because is complementary. ( \\(countepisode[all][main] / countepisode[all][all]\\) ) ratiodurationmain - Ratio between the duration of all main episodes (independently of the levels inside) divided by the duration of all main and nap episodes. This answers the question: what percentage of all sleep time ( main and nap ) was main ? We do not provide the ratio for nap because is complementary. ( \\(sumduration[all][main] / sumduration[all][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_LEVELS] : Feature Units Description ratiocountmainwithin [LEVEL] - Ratio between the count of episodes of a single sleep [LEVEL] during main sleep divided by the count of episodes of a single sleep [LEVEL] during main and nap . This answers the question: are rem episodes more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. ( \\(countepisode[remstages][main] / countepisode[remstages][all]\\) ) ratiodurationmainwithin [LEVEL] - Ratio between the duration of episodes of a single sleep [LEVEL] during main sleep divided by the duration of episodes of a single sleep [LEVEL] during main and nap . This answers the question: is rem time more frequent during main than nap sleep? We do not provide the ratio for nap because is complementary. ( \\(countepisode[remstages][main] / countepisode[remstages][all]\\) ) Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][RAPIDS] RATIOS [WITHIN_TYPES] : Feature Units Description ratiocount [LEVEL] within [TYPE] - Ratio between the count of episodes of a single sleep [LEVEL] and the count of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem episodes were rem during main / nap sleep time? (e.g., \\(countepisode[remstages][main] / countepisode[all][main]\\) ) ratioduration [LEVEL] within [TYPE] - Ratio between the duration of episodes of a single sleep [LEVEL] and the duration of all episodes of all levels during either main or nap sleep types. This answers the question: what percentage of all wake , deep , light , and rem time was rem during main / nap sleep time? (e.g., \\(sumduration[remstages][main] / sumduration[all][main]\\) ) Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments for the RAPIDS provider. Features listed in [LEVELS_AND_TYPES] are computed for any levels and types listed in [SLEEP_LEVELS] or [SLEEP_TYPES] . For example if STAGES only contains [rem, light] you will not get countepisode[wake|deep][TYPE] or sum, max, min, avg, median, or std duration . Levels or types in these lists do not influence RATIOS or ROUTINE features. Any [LEVEL] grouping is done within the elements of each class CLASSIC , STAGES , and UNIFIED . That is, we never combine CLASSIC or STAGES types to compute features. The categories for all levels (when INCLUDE_ALL_GROUPS is True ) and all SLEEP_TYPES are not considered for RATIOS features as they are always 1. These features can be computed in time segments of any length, but only the 1-minute sleep chunks within each segment instance will be used.","title":"RAPIDS provider"},{"location":"features/fitbit-sleep-intraday/#price-provider","text":"Understanding PRICE features This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider. Available time segments Available for any time segments larger or equal to one day File Sequence - data/raw/ { pid } /fitbit_sleep_intraday_raw.csv - data/raw/ { pid } /fitbit_sleep_intraday_parsed.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled.csv - data/interim/ { pid } /fitbit_sleep_intraday_episodes_resampled_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_intraday_features/fitbit_sleep_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_intraday.csv Parameters description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_INTRADAY features from the PRICE provider [FEATURES] Features to be computed from sleep intraday data, see table below [SLEEP_LEVELS] Fitbit\u2019s sleep API Version 1 only provides CLASSIC records. However, Version 1.2 provides 2 types of records: CLASSIC and STAGES . STAGES is only available in devices with a heart rate sensor and even those devices will fail to report it if the battery is low or the device is not tight enough. While CLASSIC contains 3 sleep levels ( awake , restless , and asleep ), STAGES contains 4 sleep levels ( wake , deep , light , rem ). To make it consistent, RAPIDS groups them into 2 UNIFIED sleep levels: awake ( CLASSIC : awake and restless ; STAGES : wake ) and asleep ( CLASSIC : asleep ; STAGES : deep , light , and rem ). In this section, there is a boolean flag named INCLUDE_ALL_GROUPS that if set to TRUE, computes avgdurationallmain [DAY_TYPE] features grouping all levels together in a single all category. [DAY_TYPE] The features of this provider can be computed using daily averages/standard deviations that were extracted on WEEKEND days only, WEEK days only, or ALL days [LAST_NIGHT_END] Only main sleep episodes that start within the LNE-LNE interval [ LAST_NIGHT_END , LAST_NIGHT_END + 23H 59M 59S] are taken into account to compute the features described below. [LAST_NIGHT_END] is a number ranging from 0 (midnight) to 1439 (23:59). Features description for [FITBIT_SLEEP_INTRADAY][PROVIDERS][PRICE] : Feature Units Description avgduration [LEVEL] main [DAY_TYPE] minutes Average duration of daily sleep chunks of a LEVEL . Use the DAY_TYPE flag to include daily durations from weekend days only, weekdays, or both. Use [LEVEL] to group all levels in a single all category. avgratioduration [LEVEL] withinmain [DAY_TYPE] - Average of the daily ratio between the duration of sleep chunks of a LEVEL and total duration of all main sleep episodes in a day. When INCLUDE_ALL_GROUPS is True the all LEVEL is ignored since this feature is always 1. Use the DAY_TYPE flag to include start times from weekend days only, weekdays, or both. avgstarttimeofepisodemain [DAY_TYPE] minutes Average of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both. avgendtimeofepisodemain [DAY_TYPE] minutes Average of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. avgmidpointofepisodemain [DAY_TYPE] minutes Average of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdstarttimeofepisodemain [DAY_TYPE] minutes Standard deviation of all start times of the first main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include start times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdendtimeofepisodemain [DAY_TYPE] minutes Standard deviation of all end times of the last main sleep episode within each LNE-LNE interval in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. stdmidpointofepisodemain [DAY_TYPE] minutes Standard deviation of all the differences between avgendtime... and avgstarttime.. in a time segment. Use the DAY_TYPE flag to include end times from LNE-LNE intervals that start on weekend days only, weekdays, or both. socialjetlag minutes Difference in minutes between the avgmidpointofepisodemain of weekends and weekdays that belong to each time segment instance. If your time segment does not contain at least one week day and one weekend day this feature will be NA. rmssdmeanstarttimeofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s starttimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s starttimeofepisodemain (bedtime) changed from night to night. rmssdmeanendtimeofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s endtimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s endtimeofepisodemain (wake time) changed from night to night. rmssdmeanmidpointofepisodemain minutes Square root of the mean squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s midpointofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the mean of how someone\u2019s midpointofepisodemain (mid time between bedtime and wake time) changed from night to night. rmssdmedianstarttimeofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s starttimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s starttimeofepisodemain (bedtime) changed from night to night. rmssdmedianendtimeofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s endtimeofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s endtimeofepisodemain (wake time) changed from night to night. rmssdmedianmidpointofepisodemain minutes Square root of the median squared successive difference (RMSSD) between today\u2019s and yesterday\u2019s midpointofepisodemain values across the entire participant\u2019s sleep data grouped per time segment instance. It represents the median of how someone\u2019s midpointofepisodemain (average mid time between bedtime and wake time) changed from night to night. Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments and LNE-LNE intervals for the PRICE provider. We recommend you use periodic segments that start in the morning so RAPIDS can chunk and group sleep episodes overnight. Shifted segments (as any other segments) are labelled based on their start and end date times. avgstarttime... and avgendtime... are roughly equivalent to an average bed and awake time only if you are using shifted segments. The features of this provider are only available on time segments that are longer than 24 hours because they are based on descriptive statistics computed across daily values. Even though Fitbit provides 2 types of sleep episodes ( main and nap ), only main sleep episodes are considered. The reference point for all times is 00:00 of the first day in the LNE-LNE interval. Sleep episodes are formed by 1-minute chunks that we group overnight starting from today\u2019s LNE and ending on tomorrow\u2019s LNE or the end of that segment (whatever is first). The features avgstarttime... and avgendtime... are the average of the first and last sleep episode across every LNE-LNE interval within a segment ( avgmidtime... is the mid point between start and end). Therefore, only segments longer than 24hrs will be averaged across more than one LNE-LNE interval. socialjetlag is only available on segment instances equal or longer than 48hrs that contain at least one weekday day and one weekend day, for example seven-day (weekly) segments.","title":"PRICE provider"},{"location":"features/fitbit-sleep-summary/","text":"Fitbit Sleep Summary \u00b6 Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [CONTAINER] Container where your sleep summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Understanding RAPIDS features This diagram will help you understand how sleep episodes are chunked and grouped within time segments using SLEEP_SUMMARY_LAST_NIGHT_END for the RAPIDS provider. Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. There are three sleep types: main , nap , and all . The all type means both main sleep and naps are considered. [FEATURES] Features to be computed from sleep summary data, see table below [FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END] As an exception, the LAST_NIGHT_END parameter for this provider is in the data stream configuration section. This parameter controls how sleep episodes are assigned to different days and affects wake and bedtimes. Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description firstwaketimeTYPE minutes First wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode\u2019s end time. lastwaketimeTYPE minutes Last wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode\u2019s end time. firstbedtimeTYPE minutes First bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode\u2019s start time. lastbedtimeTYPE minutes Last bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode\u2019s start time. countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments using LNE for the RAPIDS provider. There are three sleep types (TYPE): main , nap , all . The all type groups both main sleep and naps . All types are based on Fitbit\u2019s labels. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Most features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type . Bed time and sleep duration are based on episodes that started between today\u2019s LNE and tomorrow\u2019s LNE while awake time is based on the episodes that started between yesterday\u2019s LNE and today\u2019s LNE The reference point for bed/awake times is today\u2019s 00:00. You can have bedtimes larger than 24 and awake times smaller than 0 These features are only available for time segments that span midnight to midnight of the same or different day. We include first and last wake and bedtimes because, when LAST_NIGHT_END is 10 am, the first bedtime could match a nap at 2 pm, and the last bedtime could match a main overnight sleep episode that starts at 10pm. Set the value for SLEEP_SUMMARY_LAST_NIGHT_END int the config parameter [FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END].","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#fitbit-sleep-summary","text":"Sensor parameters description for [FITBIT_SLEEP_SUMMARY] : Key Description [CONTAINER] Container where your sleep summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Sleep Summary"},{"location":"features/fitbit-sleep-summary/#rapids-provider","text":"Understanding RAPIDS features This diagram will help you understand how sleep episodes are chunked and grouped within time segments using SLEEP_SUMMARY_LAST_NIGHT_END for the RAPIDS provider. Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_sleep_summary_raw.csv - data/raw/ { pid } /fitbit_sleep_summary_with_datetime.csv - data/interim/ { pid } /fitbit_sleep_summary_features/fitbit_sleep_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_sleep_summary.csv Parameters description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_SLEEP_SUMMARY features from the RAPIDS provider [SLEEP_TYPES] Types of sleep to be included in the feature extraction computation. There are three sleep types: main , nap , and all . The all type means both main sleep and naps are considered. [FEATURES] Features to be computed from sleep summary data, see table below [FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END] As an exception, the LAST_NIGHT_END parameter for this provider is in the data stream configuration section. This parameter controls how sleep episodes are assigned to different days and affects wake and bedtimes. Features description for [FITBIT_SLEEP_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description firstwaketimeTYPE minutes First wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode\u2019s end time. lastwaketimeTYPE minutes Last wake time for a certain sleep type during a time segment. Wake time is number of minutes after midnight of a sleep episode\u2019s end time. firstbedtimeTYPE minutes First bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode\u2019s start time. lastbedtimeTYPE minutes Last bedtime for a certain sleep type during a time segment. Bedtime is number of minutes after midnight of a sleep episode\u2019s start time. countepisodeTYPE episodes Number of sleep episodes for a certain sleep type during a time segment. avgefficiencyTYPE scores Average sleep efficiency for a certain sleep type during a time segment. sumdurationafterwakeupTYPE minutes Total duration the user stayed in bed after waking up for a certain sleep type during a time segment. sumdurationasleepTYPE minutes Total sleep duration for a certain sleep type during a time segment. sumdurationawakeTYPE minutes Total duration the user stayed awake but still in bed for a certain sleep type during a time segment. sumdurationtofallasleepTYPE minutes Total duration the user spent to fall asleep for a certain sleep type during a time segment. sumdurationinbedTYPE minutes Total duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. avgdurationafterwakeupTYPE minutes Average duration the user stayed in bed after waking up for a certain sleep type during a time segment. avgdurationasleepTYPE minutes Average sleep duration for a certain sleep type during a time segment. avgdurationawakeTYPE minutes Average duration the user stayed awake but still in bed for a certain sleep type during a time segment. avgdurationtofallasleepTYPE minutes Average duration the user spent to fall asleep for a certain sleep type during a time segment. avgdurationinbedTYPE minutes Average duration the user stayed in bed (sumdurationtofallasleep + sumdurationawake + sumdurationasleep + sumdurationafterwakeup) for a certain sleep type during a time segment. Assumptions/Observations This diagram will help you understand how sleep episodes are chunked and grouped within time segments using LNE for the RAPIDS provider. There are three sleep types (TYPE): main , nap , all . The all type groups both main sleep and naps . All types are based on Fitbit\u2019s labels. There are two versions of Fitbit\u2019s sleep API ( version 1 and version 1.2 ), and each provides raw sleep data in a different format: Count & duration summaries . v1 contains count_awake , duration_awake , count_awakenings , count_restless , and duration_restless fields for every sleep record but v1.2 does not. API columns . Most features are computed based on the values provided by Fitbit\u2019s API: efficiency , minutes_after_wakeup , minutes_asleep , minutes_awake , minutes_to_fall_asleep , minutes_in_bed , is_main_sleep and type . Bed time and sleep duration are based on episodes that started between today\u2019s LNE and tomorrow\u2019s LNE while awake time is based on the episodes that started between yesterday\u2019s LNE and today\u2019s LNE The reference point for bed/awake times is today\u2019s 00:00. You can have bedtimes larger than 24 and awake times smaller than 0 These features are only available for time segments that span midnight to midnight of the same or different day. We include first and last wake and bedtimes because, when LAST_NIGHT_END is 10 am, the first bedtime could match a nap at 2 pm, and the last bedtime could match a main overnight sleep episode that starts at 10pm. Set the value for SLEEP_SUMMARY_LAST_NIGHT_END int the config parameter [FITBIT_DATA_STREAMS][data stream][SLEEP_SUMMARY_LAST_NIGHT_END].","title":"RAPIDS provider"},{"location":"features/fitbit-steps-intraday/","text":"Fitbit Steps Intraday \u00b6 Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [CONTAINER] Container where your steps intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. [EXCLUDE_SLEEP] Step data will be excluded if it was logged during sleep periods when at least one [EXCLUDE] flag is set to True . Sleep can be delimited by (1) a fixed period that repeats on every day if [TIME_BASED][EXCLUDE] is True or (2) by Fitbit summary sleep episodes if [FITBIT_BASED][EXCLUDE] is True. If both are True (3), we use all Fitbit sleep episodes as well as the time-based episodes that do not overlap with any Fitbit episodes. If [TIME_BASED][EXCLUDE] is True, make sure Fitbit sleep summary container points to a valid table or file. RAPIDS provider \u00b6 Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_with_datetime.csv - data/raw/ { pid } /fitbit_sleep_summary_raw.csv ( Only when [ EXCLUDE_SLEEP ][ EXCLUDE ]= True and [ EXCLUDE_SLEEP ][ TYPE ]= FITBIT_BASED ) - data/interim/ { pid } /fitbit_steps_intraday_with_datetime_exclude_sleep.csv ( Only when [ EXCLUDE_SLEEP ][ EXCLUDE ]= True ) - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#fitbit-steps-intraday","text":"Sensor parameters description for [FITBIT_STEPS_INTRADAY] : Key Description [CONTAINER] Container where your steps intraday data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. [EXCLUDE_SLEEP] Step data will be excluded if it was logged during sleep periods when at least one [EXCLUDE] flag is set to True . Sleep can be delimited by (1) a fixed period that repeats on every day if [TIME_BASED][EXCLUDE] is True or (2) by Fitbit summary sleep episodes if [FITBIT_BASED][EXCLUDE] is True. If both are True (3), we use all Fitbit sleep episodes as well as the time-based episodes that do not overlap with any Fitbit episodes. If [TIME_BASED][EXCLUDE] is True, make sure Fitbit sleep summary container points to a valid table or file.","title":"Fitbit Steps Intraday"},{"location":"features/fitbit-steps-intraday/#rapids-provider","text":"Available time segments Available for all time segments File Sequence - data/raw/ { pid } /fitbit_steps_intraday_raw.csv - data/raw/ { pid } /fitbit_steps_intraday_with_datetime.csv - data/raw/ { pid } /fitbit_sleep_summary_raw.csv ( Only when [ EXCLUDE_SLEEP ][ EXCLUDE ]= True and [ EXCLUDE_SLEEP ][ TYPE ]= FITBIT_BASED ) - data/interim/ { pid } /fitbit_steps_intraday_with_datetime_exclude_sleep.csv ( Only when [ EXCLUDE_SLEEP ][ EXCLUDE ]= True ) - data/interim/ { pid } /fitbit_steps_intraday_features/fitbit_steps_intraday_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_intraday.csv Parameters description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_INTRADAY features from the RAPIDS provider [FEATURES] Features to be computed from steps intraday data, see table below [THRESHOLD_ACTIVE_BOUT] Every minute with Fitbit steps data wil be labelled as sedentary if its step count is below this threshold, otherwise, active . [INCLUDE_ZERO_STEP_ROWS] Whether or not to include time segments with a 0 step count during the whole day. Features description for [FITBIT_STEPS_INTRADAY][PROVIDERS][RAPIDS] : Feature Units Description sumsteps steps The total step count during a time segment. maxsteps steps The maximum step count during a time segment. minsteps steps The minimum step count during a time segment. avgsteps steps The average step count during a time segment. stdsteps steps The standard deviation of step count during a time segment. countepisodesedentarybout bouts Number of sedentary bouts during a time segment. sumdurationsedentarybout minutes Total duration of all sedentary bouts during a time segment. maxdurationsedentarybout minutes The maximum duration of any sedentary bout during a time segment. mindurationsedentarybout minutes The minimum duration of any sedentary bout during a time segment. avgdurationsedentarybout minutes The average duration of sedentary bouts during a time segment. stddurationsedentarybout minutes The standard deviation of the duration of sedentary bouts during a time segment. countepisodeactivebout bouts Number of active bouts during a time segment. sumdurationactivebout minutes Total duration of all active bouts during a time segment. maxdurationactivebout minutes The maximum duration of any active bout during a time segment. mindurationactivebout minutes The minimum duration of any active bout during a time segment. avgdurationactivebout minutes The average duration of active bouts during a time segment. stddurationactivebout minutes The standard deviation of the duration of active bouts during a time segment. Assumptions/Observations Active and sedentary bouts . If the step count per minute is smaller than THRESHOLD_ACTIVE_BOUT (default value is 10), that minute is labelled as sedentary, otherwise, is labelled as active. Active and sedentary bouts are periods of consecutive minutes labelled as active or sedentary .","title":"RAPIDS provider"},{"location":"features/fitbit-steps-summary/","text":"Fitbit Steps Summary \u00b6 Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [CONTAINER] Container where your steps summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc. RAPIDS provider \u00b6 Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#fitbit-steps-summary","text":"Sensor parameters description for [FITBIT_STEPS_SUMMARY] : Key Description [CONTAINER] Container where your steps summary data is stored, depending on the data stream you are using this can be a database table, a CSV file, etc.","title":"Fitbit Steps Summary"},{"location":"features/fitbit-steps-summary/#rapids-provider","text":"Available time segments Only available for segments that span 1 or more complete days (e.g. Jan 1 st 00:00 to Jan 3 rd 23:59) File Sequence - data/raw/ { pid } /fitbit_steps_summary_raw.csv - data/raw/ { pid } /fitbit_steps_summary_with_datetime.csv - data/interim/ { pid } /fitbit_steps_summary_features/fitbit_steps_summary_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /fitbit_steps_summary.csv Parameters description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract FITBIT_STEPS_SUMMARY features from the RAPIDS provider [FEATURES] Features to be computed from steps summary data, see table below Features description for [FITBIT_STEPS_SUMMARY][PROVIDERS][RAPIDS] : Feature Units Description maxsumsteps steps The maximum daily step count during a time segment. minsumsteps steps The minimum daily step count during a time segment. avgsumsteps steps The average daily step count during a time segment. mediansumsteps steps The median of daily step count during a time segment. stdsumsteps steps The standard deviation of daily step count during a time segment. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/","text":"Phone Accelerometer \u00b6 Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the accelerometer data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. PANDA provider \u00b6 These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#phone-accelerometer","text":"Sensor parameters description for [PHONE_ACCELEROMETER] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the accelerometer data is stored","title":"Phone Accelerometer"},{"location":"features/phone-accelerometer/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][RAPIDS] : Feature Units Description maxmagnitude m/s 2 The maximum magnitude of acceleration ( \\(\\|acceleration\\| = \\sqrt{x^2 + y^2 + z^2}\\) ). minmagnitude m/s 2 The minimum magnitude of acceleration. avgmagnitude m/s 2 The average magnitude of acceleration. medianmagnitude m/s 2 The median magnitude of acceleration. stdmagnitude m/s 2 The standard deviation of acceleration. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem.","title":"RAPIDS provider"},{"location":"features/phone-accelerometer/#panda-provider","text":"These features are based on the work by Panda et al . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_accelerometer_raw.csv - data/raw/ { pid } /phone_accelerometer_with_datetime.csv - data/interim/ { pid } /phone_accelerometer_features/phone_accelerometer_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_accelerometer.csv Parameters description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Key Description [COMPUTE] Set to True to extract PHONE_ACCELEROMETER features from the PANDA provider [FEATURES] Features to be computed for exertional and non-exertional activity episodes, see table below Features description for [PHONE_ACCELEROMETER][PROVIDERS][PANDA] : Feature Units Description sumduration minutes Total duration of all exertional or non-exertional activity episodes. maxduration minutes Longest duration of any exertional or non-exertional activity episode. minduration minutes Shortest duration of any exertional or non-exertional activity episode. avgduration minutes Average duration of any exertional or non-exertional activity episode. medianduration minutes Median duration of any exertional or non-exertional activity episode. stdduration minutes Standard deviation of the duration of all exertional or non-exertional activity episodes. Assumptions/Observations Analyzing accelerometer data is a memory intensive task. If RAPIDS crashes is likely because the accelerometer dataset for a participant is to big to fit in memory. We are considering different alternatives to overcome this problem. See Panda et al for a definition of exertional and non-exertional activity episodes","title":"PANDA provider"},{"location":"features/phone-activity-recognition/","text":"Phone Activity Recognition \u00b6 Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#phone-activity-recognition","text":"Sensor parameters description for [PHONE_ACTIVITY_RECOGNITION] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where the activity data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where the activity data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same activity episode","title":"Phone Activity Recognition"},{"location":"features/phone-activity-recognition/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_activity_recognition_raw.csv - data/raw/ { pid } /phone_activity_recognition_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_episodes.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled.csv - data/interim/ { pid } /phone_activity_recognition_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_activity_recognition_features/phone_activity_recognition_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_activity_recognition.csv Parameters description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_ACTIVITY_RECOGNITION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [ACTIVITY_CLASSES][STATIONARY] An array of the activity labels to be considered in the STATIONARY category choose any of still , tilting [ACTIVITY_CLASSES][MOBILE] An array of the activity labels to be considered in the MOBILE category choose any of on_foot , walking , running , on_bicycle [ACTIVITY_CLASSES][VEHICLE] An array of the activity labels to be considered in the VEHICLE category choose any of in_vehicule Features description for [PHONE_ACTIVITY_RECOGNITION][PROVIDERS][RAPIDS] : Feature Units Description count rows Number of episodes. mostcommonactivity activity type The most common activity type (e.g. still , on_foot , etc.). If there is a tie, the first one is chosen. countuniqueactivities activity type Number of unique activities. durationstationary minutes The total duration of [ACTIVITY_CLASSES][STATIONARY] episodes durationmobile minutes The total duration of [ACTIVITY_CLASSES][MOBILE] episodes of on foot, running, and on bicycle activities durationvehicle minutes The total duration of [ACTIVITY_CLASSES][VEHICLE] episodes of on vehicle activity Assumptions/Observations iOS Activity Recognition names and types are unified with Android labels: iOS Activity Name Android Activity Name Android Activity Type walking walking 7 running running 8 cycling on_bicycle 1 automotive in_vehicle 0 stationary still 3 unknown unknown 4 In AWARE, Activity Recognition data for Android and iOS are stored in two different database tables, RAPIDS automatically infers what platform each participant belongs to based on their participant file .","title":"RAPIDS provider"},{"location":"features/phone-applications-crashes/","text":"Phone Applications Crashes \u00b6 Sensor parameters description for [PHONE_APPLICATIONS_CRASHES] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications crashes data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_APPLICATIONS_CRASHES ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Applications Crashes"},{"location":"features/phone-applications-crashes/#phone-applications-crashes","text":"Sensor parameters description for [PHONE_APPLICATIONS_CRASHES] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications crashes data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_APPLICATIONS_CRASHES ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Applications Crashes"},{"location":"features/phone-applications-foreground/","text":"Phone Applications Foreground \u00b6 Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) RAPIDS provider \u00b6 The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#phone-applications-foreground","text":"Sensor parameters description for [PHONE_APPLICATIONS_FOREGROUND] (these parameters are used by the only provider available at the moment, RAPIDS): Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications foreground data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored)","title":"Phone Applications Foreground"},{"location":"features/phone-applications-foreground/#rapids-provider","text":"The app category (genre) catalogue used in these features was originally created by Stachl et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_applications_foreground_raw.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime.csv - data/raw/ { pid } /phone_applications_foreground_with_datetime_with_categories.csv - data/interim/ { pid } /phone_applications_foreground_features/phone_applications_foreground_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_applications_foreground.csv Parameters description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_APPLICATIONS_FOREGROUND features from the RAPIDS provider [FEATURES] Features to be computed, see table below [SINGLE_CATEGORIES] An array of app categories to be included in the feature extraction computation. The special keyword all represents a category with all the apps from each participant. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [MULTIPLE_CATEGORIES] An array of collections representing meta-categories (a group of categories). They key of each element is the name of the meta-category and the value is an array of member app categories. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [SINGLE_APPS] An array of apps to be included in the feature extraction computation. Use their package name (e.g. com.google.android.youtube ) or the reserved keyword top1global (the most used app by a participant over the whole monitoring study) [EXCLUDED_CATEGORIES] An array of app categories to be excluded from the feature extraction computation. By default we use the category catalogue pointed by [APPLICATION_CATEGORIES][CATALOGUE_FILE] (see the Sensor parameters description table above) [EXCLUDED_APPS] An array of apps to be excluded from the feature extraction computation. Use their package name, for example: com.google.android.youtube Features description for [PHONE_APPLICATIONS_FOREGROUND][PROVIDERS][RAPIDS] : Feature Units Description count apps Number of times a single app or apps within a category were used (i.e. they were brought to the foreground either by tapping their icon or switching to it from another app) timeoffirstuse minutes The time in minutes between 12:00am (midnight) and the first use of a single app or apps within a category during a time_segment timeoflastuse minutes The time in minutes between 12:00am (midnight) and the last use of a single app or apps within a category during a time_segment frequencyentropy nats The entropy of the used apps within a category during a time_segment (each app is seen as a unique event, the more apps were used, the higher the entropy). This is especially relevant when computed over all apps. Entropy cannot be obtained for a single app Assumptions/Observations Features can be computed by app, by apps grouped under a single category (genre) and by multiple categories grouped together (meta-categories). For example, we can get features for Facebook (single app), for Social Network apps (a category including Facebook and other social media apps) or for Social (a meta-category formed by Social Network and Social Media Tools categories). Apps installed by default like YouTube are considered systems apps on some phones. We do an exact match to exclude apps where \u201cgenre\u201d == EXCLUDED_CATEGORIES or \u201cpackage_name\u201d == EXCLUDED_APPS . We provide three ways of classifying and app within a category (genre): a) by automatically scraping its official category from the Google Play Store, b) by using the catalogue created by Stachl et al. which we provide in RAPIDS ( data/external/stachl_application_genre_catalogue.csv ), or c) by manually creating a personalized catalogue. You can choose a, b or c by modifying [APPLICATION_GENRES] keys and values (see the Sensor parameters description table above).","title":"RAPIDS provider"},{"location":"features/phone-applications-notifications/","text":"Phone Applications Notifications \u00b6 Sensor parameters description for [PHONE_APPLICATIONS_NOTIFICATIONS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications notifications data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_APPLICATIONS_NOTIFICATIONS ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Applications Notifications"},{"location":"features/phone-applications-notifications/#phone-applications-notifications","text":"Sensor parameters description for [PHONE_APPLICATIONS_NOTIFICATIONS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the applications notifications data is stored [APPLICATION_CATEGORIES][CATALOGUE_SOURCE] FILE or GOOGLE . If FILE , app categories (genres) are read from [CATALOGUE_FILE] . If [GOOGLE] , app categories (genres) are scrapped from the Play Store [APPLICATION_CATEGORIES][CATALOGUE_FILE] CSV file with a package_name and genre column. By default we provide the catalogue created by Stachl et al in data/external/stachl_application_genre_catalogue.csv [APPLICATION_CATEGORIES][UPDATE_CATALOGUE_FILE] if [CATALOGUE_SOURCE] is equal to FILE , this flag signals whether or not to update [CATALOGUE_FILE] , if [CATALOGUE_SOURCE] is equal to GOOGLE all scraped genres will be saved to [CATALOGUE_FILE] [APPLICATION_CATEGORIES][SCRAPE_MISSING_CATEGORIES] This flag signals whether or not to scrape categories (genres) missing from the [CATALOGUE_FILE] . If [CATALOGUE_SOURCE] is equal to GOOGLE , all genres are scraped anyway (this flag is ignored) Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_APPLICATIONS_NOTIFICATIONS ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Applications Notifications"},{"location":"features/phone-battery/","text":"Phone Battery \u00b6 Sensor parameters description for [PHONE_BATTERY] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"Phone Battery"},{"location":"features/phone-battery/#phone-battery","text":"Sensor parameters description for [PHONE_BATTERY] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the battery data is stored [EPISODE_THRESHOLD_BETWEEN_ROWS] Difference in minutes between any two rows for them to be considered part of the same battery charge or discharge episode","title":"Phone Battery"},{"location":"features/phone-battery/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_battery_raw.csv - data/interim/ { pid } /phone_battery_episodes.csv - data/interim/ { pid } /phone_battery_episodes_resampled.csv - data/interim/ { pid } /phone_battery_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_battery_features/phone_battery_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_battery.csv Parameters description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BATTERY features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BATTERY][PROVIDERS][RAPIDS] : Feature Units Description countdischarge episodes Number of discharging episodes. sumdurationdischarge minutes The total duration of all discharging episodes. countcharge episodes Number of battery charging episodes. sumdurationcharge minutes The total duration of all charging episodes. avgconsumptionrate episodes/minutes The average of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration maxconsumptionrate episodes/minutes The highest of all episodes\u2019 consumption rates. An episode\u2019s consumption rate is defined as the ratio between its battery delta and duration Assumptions/Observations We convert battery data collected with iOS client v1 (autodetected because battery status 4 do not exist) to match Android battery format: we swap status 3 for 5 and 1 for 3 We group battery data into discharge or charge episodes considering any contiguous rows with consecutive reductions or increases of the battery level if they are logged within [EPISODE_THRESHOLD_BETWEEN_ROWS] minutes from each other.","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/","text":"Phone Bluetooth \u00b6 Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the bluetooth data is stored RAPIDS provider \u00b6 Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider DORYAB provider \u00b6 This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#phone-bluetooth","text":"Sensor parameters description for [PHONE_BLUETOOTH] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the bluetooth data is stored","title":"Phone Bluetooth"},{"location":"features/phone-bluetooth/#rapids-provider","text":"Warning The features of this provider are deprecated in favor of DORYAB provider (see below). Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_BLUETOOTH][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned devices during a time segment, a device can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique devices during a time segment as identified by their hardware ( bt_address ) address countscansmostuniquedevice scans Number of scans of the most sensed device within each time segment instance Assumptions/Observations From v0.2.0 countscans , uniquedevices , countscansmostuniquedevice were deprecated because they overlap with the respective features for ALL devices of the PHONE_BLUETOOTH DORYAB provider","title":"RAPIDS provider"},{"location":"features/phone-bluetooth/#doryab-provider","text":"This provider is adapted from the work by Doryab et al . Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_bluetooth_raw.csv - data/raw/ { pid } /phone_bluetooth_with_datetime.csv - data/interim/ { pid } /phone_bluetooth_features/phone_bluetooth_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_bluetooth.csv \" Parameters description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_BLUETOOTH features from the DORYAB provider [FEATURES] Features to be computed, see table below. These features are computed for three device categories: all devices, own devices and other devices. Features description for [PHONE_BLUETOOTH][PROVIDERS][DORYAB] : Feature Units Description countscans scans Number of scans (rows) from the devices sensed during a time segment instance. The more scans a bluetooth device has the longer it remained within range of the participant\u2019s phone uniquedevices devices Number of unique bluetooth devices sensed during a time segment instance as identified by their hardware addresses ( bt_address ) meanscans scans Mean of the scans of every sensed device within each time segment instance stdscans scans Standard deviation of the scans of every sensed device within each time segment instance countscans most frequentdevice within segments scans Number of scans of the most sensed device within each time segment instance countscans least frequentdevice within segments scans Number of scans of the least sensed device within each time segment instance countscans most frequentdevice across segments scans Number of scans of the most sensed device across time segment instances of the same type countscans least frequentdevice across segments scans Number of scans of the least sensed device across time segment instances of the same type per device countscans most frequentdevice acrossdataset scans Number of scans of the most sensed device across the entire dataset of every participant countscans least frequentdevice acrossdataset scans Number of scans of the least sensed device across the entire dataset of every participant Assumptions/Observations Devices are classified as belonging to the participant ( own ) or to other people ( others ) using k-means based on the number of times and the number of days each device was detected across each participant\u2019s dataset. See Doryab et al for more details. If ownership cannot be computed because all devices were detected on only one day, they are all considered as other . Thus all and other features will be equal. The likelihood of this scenario decreases the more days of data you have. The most and least frequent devices will be the same across time segment instances and across the entire dataset when every time segment instance covers every hour of a dataset. For example, daily segments (00:00 to 23:59) fall in this category but morning segments (06:00am to 11:59am) or periodic 30-minute segments don\u2019t. Example Simplified raw bluetooth data The following is a simplified example with bluetooth data from three days and two time segments: morning and afternoon. There are two own devices: 5C836F5-487E-405F-8E28-21DBD40FA4FF detected seven times across two days and 499A1EAF-DDF1-4657-986C-EA5032104448 detected eight times on a single day. local_date segment bt_address own_device 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-29 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-29 afternoon 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2016-11-30 morning 48872A52-68DE-420D-98DA-73339A1C4685 0 2016-11-30 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2016-11-30 afternoon 55C836F5-487E-405F-8E28-21DBD40FA4FF 1 2017-05-07 morning 5C5A9C41-2F68-4CEB-96D0-77DE3729B729 0 2017-05-07 morning 25262DC7-780C-4AD5-AD3A-D9776AEF7FC1 0 2017-05-07 morning 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 morning 6C444841-FE64-4375-BC3F-FA410CDC0AC7 0 2017-05-07 morning 4DC7A22D-9F1F-4DEF-8576-086910AABCB5 0 2017-05-07 afternoon 5B1E6981-2E50-4D9A-99D8-67AED430C5A8 0 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 2017-05-07 afternoon 499A1EAF-DDF1-4657-986C-EA5032104448 1 The most and least frequent OTHER devices ( own_device == 0 ) during morning segments The most and least frequent ALL | OWN | OTHER devices are computed within each time segment instance, across time segment instances of the same type and across the entire dataset of each person. These are the most and least frequent devices for OTHER devices during morning segments. most frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) least frequent device across 2016-11-29 morning: '48872A52-68DE-420D-98DA-73339A1C4685' (this device is the only one in this instance) most frequent device across 2016-11-30 morning: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent device across 2016-11-30 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) least frequent device across 2017-05-07 morning: '25262DC7-780C-4AD5-AD3A-D9776AEF7FC1' (when tied, the first occurance is chosen) most frequent across morning segments: '5B1E6981-2E50-4D9A-99D8-67AED430C5A8' least frequent across morning segments: '6C444841-FE64-4375-BC3F-FA410CDC0AC7' (when tied, the first occurance is chosen) most frequent across dataset: '499A1EAF-DDF1-4657-986C-EA5032104448' (only taking into account \"morning\" segments) least frequent across dataset: '4DC7A22D-9F1F-4DEF-8576-086910AABCB5' (when tied, the first occurance is chosen) Bluetooth features for OTHER devices and morning segments For brevity we only show the following features for morning segments: OTHER : DEVICES : [ \"countscans\" , \"uniquedevices\" , \"meanscans\" , \"stdscans\" ] SCANS_MOST_FREQUENT_DEVICE : [ \"withinsegments\" , \"acrosssegments\" , \"acrossdataset\" ] Note that countscansmostfrequentdeviceacrossdatasetothers is all 0 s because 499A1EAF-DDF1-4657-986C-EA5032104448 is excluded from the count as is labelled as an own device (not other ). local_segment countscansothers uniquedevicesothers meanscansothers stdscansothers countscansmostfrequentdevicewithinsegmentsothers countscansmostfrequentdeviceacrosssegmentsothers countscansmostfrequentdeviceacrossdatasetothers 2016-11-29-morning 1 1 1.000000 NaN 1 0.0 0.0 2016-11-30-morning 4 3 1.333333 0.57735 2 2.0 2.0 2017-05-07-morning 5 5 1.000000 0.00000 1 1.0 1.0","title":"DORYAB provider"},{"location":"features/phone-calls/","text":"Phone Calls \u00b6 Sensor parameters description for [PHONE_CALLS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the calls data is stored RAPIDS Provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"Phone Calls"},{"location":"features/phone-calls/#phone-calls","text":"Sensor parameters description for [PHONE_CALLS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the calls data is stored","title":"Phone Calls"},{"location":"features/phone-calls/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_calls_raw.csv - data/raw/ { pid } /phone_calls_with_datetime.csv - data/interim/ { pid } /phone_calls_features/phone_calls_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_calls.csv Parameters description for [PHONE_CALLS][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CALLS features from the RAPIDS provider [CALL_TYPES] The particular call_type that will be analyzed. The options for this parameter are incoming, outgoing or missed. [FEATURES] Features to be computed for outgoing , incoming , and missed calls. Note that the same features are available for both incoming and outgoing calls, while missed calls has its own set of features. See the tables below. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] incoming and outgoing calls: Feature Units Description count calls Number of calls of a particular call_type occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular call_type for a particular time_segment meanduration seconds The mean duration of all calls of a particular call_type during a particular time_segment . sumduration seconds The sum of the duration of all calls of a particular call_type during a particular time_segment . minduration seconds The duration of the shortest call of a particular call_type during a particular time_segment . maxduration seconds The duration of the longest call of a particular call_type during a particular time_segment . stdduration seconds The standard deviation of the duration of all the calls of a particular call_type during a particular time_segment . modeduration seconds The mode of the duration of all the calls of a particular call_type during a particular time_segment . entropyduration nats The estimate of the Shannon entropy for the the duration of all the calls of a particular call_type during a particular time_segment . timefirstcall minutes The time in minutes between 12:00am (midnight) and the first call of call_type . timelastcall minutes The time in minutes between 12:00am (midnight) and the last call of call_type . countmostfrequentcontact calls The number of calls of a particular call_type during a particular time_segment of the most frequent contact throughout the monitored period. Features description for [PHONE_CALLS][PROVIDERS][RAPIDS] missed calls: Feature Units Description count calls Number of missed calls that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with missed calls for a particular time_segment timefirstcall minutes The time in hours from 12:00am (Midnight) that the first missed call occurred. timelastcall minutes The time in hours from 12:00am (Midnight) that the last missed call occurred. countmostfrequentcontact calls The number of missed calls during a particular time_segment of the most frequent contact throughout the monitored period. Assumptions/Observations Traces for iOS calls are unique even for the same contact calling a participant more than once which renders countmostfrequentcontact meaningless and distinctcontacts equal to the total number of traces. [CALL_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [CALL_TYPES] outgoing matches the [FEATURES] key outgoing iOS calls data is transformed to match Android calls data format. See our algorithm","title":"RAPIDS Provider"},{"location":"features/phone-conversation/","text":"Phone Conversation \u00b6 Sensor parameters description for [PHONE_CONVERSATION] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS) RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"Phone Conversation"},{"location":"features/phone-conversation/#phone-conversation","text":"Sensor parameters description for [PHONE_CONVERSATION] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where the conversation data from Android devices is stored (the AWARE client saves this data on different tables for Android and iOS) [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where the conversation data from iOS devices is stored (the AWARE client saves this data on different tables for Android and iOS)","title":"Phone Conversation"},{"location":"features/phone-conversation/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_conversation_raw.csv - data/raw/ { pid } /phone_conversation_with_datetime.csv - data/interim/ { pid } /phone_conversation_features/phone_conversation_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_conversation.csv Parameters description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_CONVERSATION features from the RAPIDS provider [FEATURES] Features to be computed, see table below [RECORDING_MINUTES] Minutes the plugin was recording audio (default 1 min) [PAUSED_MINUTES] Minutes the plugin was NOT recording audio (default 3 min) Features description for [PHONE_CONVERSATION][PROVIDERS][RAPIDS] : Feature Units Description minutessilence minutes Minutes labeled as silence minutesnoise minutes Minutes labeled as noise minutesvoice minutes Minutes labeled as voice minutesunknown minutes Minutes labeled as unknown sumconversationduration minutes Total duration of all conversations maxconversationduration minutes Longest duration of all conversations minconversationduration minutes Shortest duration of all conversations avgconversationduration minutes Average duration of all conversations sdconversationduration minutes Standard Deviation of the duration of all conversations timefirstconversation minutes Minutes since midnight when the first conversation for a time segment was detected timelastconversation minutes Minutes since midnight when the last conversation for a time segment was detected noisesumenergy L2-norm Sum of all energy values when inference is noise noiseavgenergy L2-norm Average of all energy values when inference is noise noisesdenergy L2-norm Standard Deviation of all energy values when inference is noise noiseminenergy L2-norm Minimum of all energy values when inference is noise noisemaxenergy L2-norm Maximum of all energy values when inference is noise voicesumenergy L2-norm Sum of all energy values when inference is voice voiceavgenergy L2-norm Average of all energy values when inference is voice voicesdenergy L2-norm Standard Deviation of all energy values when inference is voice voiceminenergy L2-norm Minimum of all energy values when inference is voice voicemaxenergy L2-norm Maximum of all energy values when inference is voice silencesensedfraction - Ratio between minutessilence and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) noisesensedfraction - Ratio between minutesnoise and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) voicesensedfraction - Ratio between minutesvoice and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) unknownsensedfraction - Ratio between minutesunknown and the sum of (minutessilence, minutesnoise, minutesvoice, minutesunknown) silenceexpectedfraction - Ration between minutessilence and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) noiseexpectedfraction - Ration between minutesnoise and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) voiceexpectedfraction - Ration between minutesvoice and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) unknownexpectedfraction - Ration between minutesunknown and the number of minutes that in theory should have been sensed based on the record and pause cycle of the plugin (1440 / recordingMinutes+pausedMinutes) Assumptions/Observations The timestamp of conversation rows in iOS is in seconds so we convert it to milliseconds to match Android\u2019s format","title":"RAPIDS provider"},{"location":"features/phone-data-yield/","text":"Phone Data Yield \u00b6 This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors, ONLY include phone sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_CRASHES PHONE_APPLICATIONS_FOREGROUND PHONE_APPLICATIONS_NOTIFICATIONS PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_KEYBOARD PHONE_LIGHT PHONE_LOCATIONS PHONE_LOG PHONE_MESSAGES PHONE_SCREEN PHONE_WIFI_CONNECTED PHONE_WIFI_VISIBLE RAPIDS provider \u00b6 Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes - The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours - The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#phone-data-yield","text":"This is a combinatorial sensor which means that we use the data from multiple sensors to extract data yield features. Data yield features can be used to remove rows ( time segments ) that do not contain enough data. You should decide what is your \u201cenough\u201d threshold depending on the type of sensors you collected (frequency vs event based, e.g. acceleroemter vs calls), the length of your study, and the rates of missing data that your analysis could handle. Why is data yield important? Imagine that you want to extract PHONE_CALL features on daily segments ( 00:00 to 23:59 ). Let\u2019s say that on day 1 the phone logged 10 calls and 23 hours of data from other sensors and on day 2 the phone logged 10 calls and only 2 hours of data from other sensors. It\u2019s more likely that other calls were placed on the 22 hours of data that you didn\u2019t log on day 2 than on the 1 hour of data you didn\u2019t log on day 1, and so including day 2 in your analysis could bias your results. Sensor parameters description for [PHONE_DATA_YIELD] : Key Description [SENSORS] One or more phone sensor config keys (e.g. PHONE_MESSAGE ). The more keys you include the more accurately RAPIDS can approximate the time an smartphone was sensing data. The supported phone sensors you can include in this list are outlined below ( do NOT include Fitbit sensors, ONLY include phone sensors ). Supported phone sensors for [PHONE_DATA_YIELD][SENSORS] PHONE_ACCELEROMETER PHONE_ACTIVITY_RECOGNITION PHONE_APPLICATIONS_CRASHES PHONE_APPLICATIONS_FOREGROUND PHONE_APPLICATIONS_NOTIFICATIONS PHONE_BATTERY PHONE_BLUETOOTH PHONE_CALLS PHONE_CONVERSATION PHONE_KEYBOARD PHONE_LIGHT PHONE_LOCATIONS PHONE_LOG PHONE_MESSAGES PHONE_SCREEN PHONE_WIFI_CONNECTED PHONE_WIFI_VISIBLE","title":"Phone Data Yield"},{"location":"features/phone-data-yield/#rapids-provider","text":"Before explaining the data yield features, let\u2019s define the following relevant concepts: A valid minute is any 60 second window when any phone sensor logged at least 1 row of data A valid hour is any 60 minute window with at least X valid minutes. The X or threshold is given by [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The timestamps of all sensors are concatenated and then grouped per time segment. Minute and hour windows are created from the beginning of each time segment instance and these windows are marked as valid based on the definitions above. The duration of each time segment is taken into account to compute the features described below. Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } / { sensor } _raw.csv # one for every [PHONE_DATA_YIELD][SENSORS] - data/interim/ { pid } /phone_yielded_timestamps.csv - data/interim/ { pid } /phone_yielded_timestamps_with_datetime.csv - data/interim/ { pid } /phone_data_yield_features/phone_data_yield_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_data_yield.csv Parameters description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_DATA_YIELD features from the RAPIDS provider [FEATURES] Features to be computed, see table below [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] The proportion [0.0 ,1.0] of valid minutes in a 60-minute window necessary to flag that window as valid. Features description for [PHONE_DATA_YIELD][PROVIDERS][RAPIDS] : Feature Units Description ratiovalidyieldedminutes - The ratio between the number of valid minutes and the duration in minutes of a time segment. ratiovalidyieldedhours - The ratio between the number of valid hours and the duration in hours of a time segment. If the time segment is shorter than 1 hour this feature will always be 1. Assumptions/Observations We recommend using ratiovalidyieldedminutes on time segments that are shorter than two or three hours and ratiovalidyieldedhours for longer segments. This is because relying on yielded minutes only can be misleading when a big chunk of those missing minutes are clustered together. For example, let\u2019s assume we are working with a 24-hour time segment that is missing 12 hours of data. Two extreme cases can occur: the 12 missing hours are from the beginning of the segment or 30 minutes could be missing from every hour (24 * 30 minutes = 12 hours). ratiovalidyieldedminutes would be 0.5 for both a and b (hinting the missing circumstances are similar). However, ratiovalidyieldedhours would be 0.5 for a and 1.0 for b if [MINUTE_RATIO_THRESHOLD_FOR_VALID_YIELDED_HOURS] is between [0.0 and 0.49] (hinting that the missing circumstances might be more favorable for b . In other words, sensed data for b is more evenly spread compared to a .","title":"RAPIDS provider"},{"location":"features/phone-keyboard/","text":"Phone Keyboard \u00b6 Sensor parameters description for [PHONE_KEYBOARD] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the keyboard data is stored File Sequence - data/raw/ { pid } /phone_keyboard_raw.csv - data/raw/ { pid } /phone_keyboard_with_datetime.csv - data/interim/ { pid } /phone_keyboard_features/phone_keyboard_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_keyboard.csv Features description for [PHONE_KEYBOARD] : Feature Units Description sessioncount - Number of typing sessions in a time segment. A session begins with any keypress and finishes until 5 seconds have elapsed since the last key was pressed or the application that the user was typing on changes. averagesessionlength milliseconds Average length of all sessions in a time segment instance averageinterkeydelay milliseconds The average time between keystrokes measured in milliseconds. changeintextlengthlessthanminusone Number of times a keyboard typing or swiping event changed the length of the current text to less than one fewer character. changeintextlengthequaltominusone Number of times a keyboard typing or swiping event changed the length of the current text in exactly one fewer character. changeintextlengthequaltoone Number of times a keyboard typing or swiping event changed the length of the current text in exactly one more character. changeintextlengthmorethanone Number of times a keyboard typing or swiping event changed the length of the current text to more than one character. maxtextlength Length in characters of the longest sentence(s) contained in the typing text box of any app during the time segment. lastmessagelength Length of the last text in characters of the sentence(s) contained in the typing text box of any app during the time segment. totalkeyboardtouches Average number of typing events across all sessions in a time segment instance. Note We did not find a reliable way to distinguish between AutoCorrect or AutoComplete changes, since both can be applied with a single touch or swipe event and can decrease or increase the length of the text by an arbitrary number of characters.","title":"Phone Keyboard"},{"location":"features/phone-keyboard/#phone-keyboard","text":"Sensor parameters description for [PHONE_KEYBOARD] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the keyboard data is stored File Sequence - data/raw/ { pid } /phone_keyboard_raw.csv - data/raw/ { pid } /phone_keyboard_with_datetime.csv - data/interim/ { pid } /phone_keyboard_features/phone_keyboard_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_keyboard.csv Features description for [PHONE_KEYBOARD] : Feature Units Description sessioncount - Number of typing sessions in a time segment. A session begins with any keypress and finishes until 5 seconds have elapsed since the last key was pressed or the application that the user was typing on changes. averagesessionlength milliseconds Average length of all sessions in a time segment instance averageinterkeydelay milliseconds The average time between keystrokes measured in milliseconds. changeintextlengthlessthanminusone Number of times a keyboard typing or swiping event changed the length of the current text to less than one fewer character. changeintextlengthequaltominusone Number of times a keyboard typing or swiping event changed the length of the current text in exactly one fewer character. changeintextlengthequaltoone Number of times a keyboard typing or swiping event changed the length of the current text in exactly one more character. changeintextlengthmorethanone Number of times a keyboard typing or swiping event changed the length of the current text to more than one character. maxtextlength Length in characters of the longest sentence(s) contained in the typing text box of any app during the time segment. lastmessagelength Length of the last text in characters of the sentence(s) contained in the typing text box of any app during the time segment. totalkeyboardtouches Average number of typing events across all sessions in a time segment instance. Note We did not find a reliable way to distinguish between AutoCorrect or AutoComplete changes, since both can be applied with a single touch or swipe event and can decrease or increase the length of the text by an arbitrary number of characters.","title":"Phone Keyboard"},{"location":"features/phone-light/","text":"Phone Light \u00b6 Sensor parameters description for [PHONE_LIGHT] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the light data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"Phone Light"},{"location":"features/phone-light/#phone-light","text":"Sensor parameters description for [PHONE_LIGHT] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the light data is stored","title":"Phone Light"},{"location":"features/phone-light/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_light_raw.csv - data/raw/ { pid } /phone_light_with_datetime.csv - data/interim/ { pid } /phone_light_features/phone_light_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_light.csv Parameters description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_LIGHT features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_LIGHT][PROVIDERS][RAPIDS] : Feature Units Description count rows Number light sensor rows recorded. maxlux lux The maximum ambient luminance. minlux lux The minimum ambient luminance. avglux lux The average ambient luminance. medianlux lux The median ambient luminance. stdlux lux The standard deviation of ambient luminance. Assumptions/Observations NA","title":"RAPIDS provider"},{"location":"features/phone-locations/","text":"Phone Locations \u00b6 Sensor parameters description for [PHONE_LOCATIONS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS , ALL_RESAMPLED or FUSED_RESAMPLED . This filter is based on the provider column of the locations table, ALL includes every row, GPS only includes rows where the provider is gps, ALL_RESAMPLED includes all rows after being resampled, and FUSED_RESAMPLED only includes rows where the provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use Android and iOS clients can collect location coordinates through the phone\u2019s GPS, the network cellular towers around the phone, or Google\u2019s fused location API. If you want to use only the GPS provider, set [LOCATIONS_TO_USE] to GPS If you want to use all providers, set [LOCATIONS_TO_USE] to ALL If you collected location data from different providers, including the fused API, use ALL_RESAMPLED If your mobile client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . ALL_RESAMPLED and RESAMPLE_FUSED take the original location coordinates and replicate each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] . This is done because Google\u2019s API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one and because GPS and network providers can log data at variable rates. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair. For example, participant A\u2019s phone did not collect data between 10.30 am and 10:50 am and between 11:05am and 11:40am, the last known coordinate pair is replicated during the first period but not the second. In other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes. FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer than this threshold even if the phone was sensing data continuously. For example, participant A went home at 9 pm, and their phone was sensing data without gaps until 11 am the next morning, the last known location is replicated until 9 am. If you have suggestions to modify or improve this resampling, let us know. BARNETT provider \u00b6 These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same or a different day (daily, weekly, weekend, etc.) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this is dropped. This number means there\u2019s a 68% probability the actual location is within this radius [IF_MULTIPLE_TIMEZONES] Currently, USE_MOST_COMMON is the only value supported. If the location data for a participant belongs to multiple time zones, we select the most common because Barnett\u2019s algorithm can only handle one time zone [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am, including any pauses within a 200-meter radius. disttravelled meters Total distance traveled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day, and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement is based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Multi day segment features Barnett\u2019s features are only available on time segments that span entire days (00:00:00 to 23:59:59). Such segments can be one-day long (daily) or multi-day (weekly, for example). Multi-day segment features are computed based on daily features summarized the following way: sum for hometime , disttravelled , siglocsvisited , and minutes_data_used max for maxdiam , and maxhomedist mean for rog , avgflightlen , stdflightlen , avgflightdur , stdflightdur , probpause , siglocentropy , circdnrtn , wkenddayrtn , and minsmissing Computation speed The process to extract these features can be slow compared to other sensors and providers due to the required simulation. How are these features computed? These features are based on a Pause-Flight model. A pause is defined as a mobility trace (location pings) within a certain duration and distance (by default, 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS, we only expose one parameter for these features (accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al . DORYAB provider \u00b6 These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_processed_with_datetime_with_doryab_columns.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_ROW_GAP] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the phone was not sensing. This value must be larger than your GPS sampling interval when [LOCATIONS_TO_USE] is ALL or GPS , otherwise all the stationary-related features will be NA. If [LOCATIONS_TO_USE] is ALL_RESAMPLED or FUSED_RESAMPLED , you can use the default value as every row will be resampled at 1-minute intervals. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [CLUSTER_ON] Set this flag to PARTICIPANT_DATASET to create clusters based on the entire participant\u2019s dataset or to TIME_SEGMENT to create clusters based on all the instances of the corresponding time segment (e.g. all mornings) or to TIME_SEGMENT_INSTANCE to create clusters based on a single instance (e.g. 2020-05-20\u2019s morning). [INFER_HOME_LOCATION_STRATEGY] The strategy applied to infer home locations. Set to DORYAB_STRATEGY to infer one home location for the entire dataset of each participant or to SUN_LI_VEGA_STRATEGY to infer one home location per day per participant. See Observations below to know more. [MINIMUM_DAYS_TO_DETECT_HOME_CHANGES] The minimum number of consecutive days a new home location candidate has to repeat before it is considered the participant\u2019s new home. This parameter will be used only when [INFER_HOME_LOCATION_STRATEGY] is set to SUN_LI_VEGA_STRATEGY . [CLUSTERING_ALGORITHM] The original Doryab et al. implementation uses DBSCAN , OPTICS is also available with similar (but not identical) clustering results and lower memory consumption. [RADIUS_FOR_HOME] All location coordinates within this distance (meters) from the home location coordinates are considered a homestay (see timeathome feature). Features description for [PHONE_LOCATIONS][PROVIDERS][DORYAB] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance traveled in a time segment using the haversine formula. avgspeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - Deprecated, see Observations below. \u201cIt encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN/OPTICS clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between stationary time and total location sensed time. A lat/long coordinate pair is labeled as stationary if its speed (distance/time) to the next coordinate pair is less than 1km/hr. A higher value represents a more stationary routine. outlierstimepercent - Ratio between the time spent in non-significant clusters divided by the time spent in all clusters (stationary time. Only stationary samples are clustered). A higher value represents more time spent in non-significant clusters. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). avglengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters; it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location). timeathome minutes Time spent at home (see Observations below for a description on how we compute home). homelabel - An integer that represents a different home location. It will be a constant number (1) for all participants when [INFER_HOME_LOCATION_STRATEGY] is set to DORYAB_STRATEGY or an incremental index if the strategy is set to SUN_LI_VEGA_STRATEGY . Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. Circadian Movement Calculation Note Feb 3 2021. It seems the implementation of this feature is not correct; we suggest not to use this feature until a fix is in place. For a detailed description of how this should be calculated, see Saeb et al . Fine-Tuning Clustering Parameters Based on an experiment where we collected fused location data for 7 days with a mean accuracy of 86 & SD of 350.874635, we determined that EPS/MAX_EPS =100 produced closer clustering results to reality. Higher values (>100) missed out on some significant places, like a short grocery visit, while lower values (<100) picked up traffic lights and stop signs while driving as significant locations. We recommend you set EPS based on your location data\u2019s accuracy (the more accurate your data is, the lower you should be able to set EPS). Duration Calculation To calculate the time duration component for our features, we compute the difference between consecutive rows\u2019 timestamps to take into account sampling rate variability. If this time difference is larger than a threshold (300 seconds by default), we replace it with NA and label that row as Moving. Home location DORYAB_STRATEGY : home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm ( DBSCAN or OPTICS ) and considering the center of the biggest cluster home for that participant. SUN_LI_VEGA_STRATEGY : home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm ( DBSCAN or OPTICS ). The following steps are used to infer the home location per day for that participant: if there are records within [03:30:00, 04:30:00] for that night: we choose the most common cluster during that period as a home candidate for that day. elif there are records within [midnight, 03:30:00) for that night: we choose the last valid cluster during that period as a home candidate for that day. elif there are records within (04:30:00, 06:00:00] for that night: we choose the first valid cluster during that period as a home candidate for that day. else: the home location is NA (missing) for that day. If the count of consecutive days with the same candidate home location cluster label is larger or equal to [MINIMUM_DAYS_TO_DETECT_HOME_CHANGES] , the candidate will be regarded as the home cluster; otherwise, the home cluster will be the last valid day\u2019s cluster. If there are no valid clusters before that day, the first home location in the days after is used.","title":"Phone Locations"},{"location":"features/phone-locations/#phone-locations","text":"Sensor parameters description for [PHONE_LOCATIONS] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the location data is stored [LOCATIONS_TO_USE] Type of location data to use, one of ALL , GPS , ALL_RESAMPLED or FUSED_RESAMPLED . This filter is based on the provider column of the locations table, ALL includes every row, GPS only includes rows where the provider is gps, ALL_RESAMPLED includes all rows after being resampled, and FUSED_RESAMPLED only includes rows where the provider is fused after being resampled. [FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD] if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled to the next valid timestamp (see the Assumptions/Observations below) only if the time difference between them is less or equal than this threshold (in minutes). [FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION] if ALL_RESAMPLED or FUSED_RESAMPLED is used, the original fused data has to be resampled, a location row is resampled at most for this long (in minutes) Assumptions/Observations Types of location data to use Android and iOS clients can collect location coordinates through the phone\u2019s GPS, the network cellular towers around the phone, or Google\u2019s fused location API. If you want to use only the GPS provider, set [LOCATIONS_TO_USE] to GPS If you want to use all providers, set [LOCATIONS_TO_USE] to ALL If you collected location data from different providers, including the fused API, use ALL_RESAMPLED If your mobile client was configured to use fused location only or want to focus only on this provider, set [LOCATIONS_TO_USE] to RESAMPLE_FUSED . ALL_RESAMPLED and RESAMPLE_FUSED take the original location coordinates and replicate each pair forward in time as long as the phone was sensing data as indicated by the joined timestamps of [PHONE_DATA_YIELD][SENSORS] . This is done because Google\u2019s API only logs a new location coordinate pair when it is sufficiently different in time or space from the previous one and because GPS and network providers can log data at variable rates. There are two parameters associated with resampling fused location. FUSED_RESAMPLED_CONSECUTIVE_THRESHOLD (in minutes, default 30) controls the maximum gap between any two coordinate pairs to replicate the last known pair. For example, participant A\u2019s phone did not collect data between 10.30 am and 10:50 am and between 11:05am and 11:40am, the last known coordinate pair is replicated during the first period but not the second. In other words, we assume that we cannot longer guarantee the participant stayed at the last known location if the phone did not sense data for more than 30 minutes. FUSED_RESAMPLED_TIME_SINCE_VALID_LOCATION (in minutes, default 720 or 12 hours) stops the last known fused location from being replicated longer than this threshold even if the phone was sensing data continuously. For example, participant A went home at 9 pm, and their phone was sensing data without gaps until 11 am the next morning, the last known location is replicated until 9 am. If you have suggestions to modify or improve this resampling, let us know.","title":"Phone Locations"},{"location":"features/phone-locations/#barnett-provider","text":"These features are based on the original open-source implementation by Barnett et al and some features created by Canzian et al . Available time segments and platforms Available only for segments that start at 00:00:00 and end at 23:59:59 of the same or a different day (daily, weekly, weekend, etc.) Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this is dropped. This number means there\u2019s a 68% probability the actual location is within this radius [IF_MULTIPLE_TIMEZONES] Currently, USE_MOST_COMMON is the only value supported. If the location data for a participant belongs to multiple time zones, we select the most common because Barnett\u2019s algorithm can only handle one time zone [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. Features description for [PHONE_LOCATIONS][PROVIDERS][BARNETT] adapted from Beiwe Summary Statistics : Feature Units Description hometime minutes Time at home. Time spent at home in minutes. Home is the most visited significant location between 8 pm and 8 am, including any pauses within a 200-meter radius. disttravelled meters Total distance traveled over a day (flights). rog meters The Radius of Gyration (rog) is a measure in meters of the area covered by a person over a day. A centroid is calculated for all the places (pauses) visited during a day, and a weighted distance between all the places and that centroid is computed. The weights are proportional to the time spent in each place. maxdiam meters The maximum diameter is the largest distance between any two pauses. maxhomedist meters The maximum distance from home in meters. siglocsvisited locations The number of significant locations visited during the day. Significant locations are computed using k-means clustering over pauses found in the whole monitoring period. The number of clusters is found iterating k from 1 to 200 stopping until the centroids of two significant locations are within 400 meters of one another. avgflightlen meters Mean length of all flights. stdflightlen meters Standard deviation of the length of all flights. avgflightdur seconds Mean duration of all flights. stdflightdur seconds The standard deviation of the duration of all flights. probpause - The fraction of a day spent in a pause (as opposed to a flight) siglocentropy nats Shannon\u2019s entropy measurement is based on the proportion of time spent at each significant location visited during a day. circdnrtn - A continuous metric quantifying a person\u2019s circadian routine that can take any value between 0 and 1, where 0 represents a daily routine completely different from any other sensed days and 1 a routine the same as every other sensed day. wkenddayrtn - Same as circdnrtn but computed separately for weekends and weekdays. Assumptions/Observations Multi day segment features Barnett\u2019s features are only available on time segments that span entire days (00:00:00 to 23:59:59). Such segments can be one-day long (daily) or multi-day (weekly, for example). Multi-day segment features are computed based on daily features summarized the following way: sum for hometime , disttravelled , siglocsvisited , and minutes_data_used max for maxdiam , and maxhomedist mean for rog , avgflightlen , stdflightlen , avgflightdur , stdflightdur , probpause , siglocentropy , circdnrtn , wkenddayrtn , and minsmissing Computation speed The process to extract these features can be slow compared to other sensors and providers due to the required simulation. How are these features computed? These features are based on a Pause-Flight model. A pause is defined as a mobility trace (location pings) within a certain duration and distance (by default, 300 seconds and 60 meters). A flight is any mobility trace between two pauses. Data is resampled and imputed before the features are computed. See Barnett et al for more information. In RAPIDS, we only expose one parameter for these features (accuracy limit). You can change other parameters in src/features/phone_locations/barnett/library/MobilityFeatures.R . Significant Locations Significant locations are determined using K-means clustering on pauses longer than 10 minutes. The number of clusters (K) is increased until no two clusters are within 400 meters from each other. After this, pauses within a certain range of a cluster (200 meters by default) count as a visit to that significant location. This description was adapted from the Supplementary Materials of Barnett et al . The Circadian Calculation For a detailed description of how this is calculated, see Canzian et al .","title":"BARNETT provider"},{"location":"features/phone-locations/#doryab-provider","text":"These features are based on the original implementation by Doryab et al. . Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_locations_raw.csv - data/interim/ { pid } /phone_locations_processed.csv - data/interim/ { pid } /phone_locations_processed_with_datetime.csv - data/interim/ { pid } /phone_locations_processed_with_datetime_with_doryab_columns.csv - data/interim/ { pid } /phone_locations_features/phone_locations_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_locations.csv Parameters description for [PHONE_LOCATIONS][PROVIDERS][DORYAB] : Key Description [COMPUTE] Set to True to extract PHONE_LOCATIONS features from the BARNETT provider [FEATURES] Features to be computed, see table below [ACCURACY_LIMIT] An integer in meters, any location rows with an accuracy higher than this will be dropped. This number means there\u2019s a 68% probability the true location is within this radius [DBSCAN_EPS] The maximum distance in meters between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. [DBSCAN_MINSAMPLES] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point of a cluster. This includes the point itself. [THRESHOLD_STATIC] It is the threshold value in km/hr which labels a row as Static or Moving. [MAXIMUM_ROW_GAP] The maximum gap (in seconds) allowed between any two consecutive rows for them to be considered part of the same displacement. If this threshold is too high, it can throw speed and distance calculations off for periods when the phone was not sensing. This value must be larger than your GPS sampling interval when [LOCATIONS_TO_USE] is ALL or GPS , otherwise all the stationary-related features will be NA. If [LOCATIONS_TO_USE] is ALL_RESAMPLED or FUSED_RESAMPLED , you can use the default value as every row will be resampled at 1-minute intervals. [MINUTES_DATA_USED] Set to True to include an extra column in the final location feature file containing the number of minutes used to compute the features on each time segment. Use this for quality control purposes; the more data minutes exist for a period, the more reliable its features should be. For fused location, a single minute can contain more than one coordinate pair if the participant is moving fast enough. [CLUSTER_ON] Set this flag to PARTICIPANT_DATASET to create clusters based on the entire participant\u2019s dataset or to TIME_SEGMENT to create clusters based on all the instances of the corresponding time segment (e.g. all mornings) or to TIME_SEGMENT_INSTANCE to create clusters based on a single instance (e.g. 2020-05-20\u2019s morning). [INFER_HOME_LOCATION_STRATEGY] The strategy applied to infer home locations. Set to DORYAB_STRATEGY to infer one home location for the entire dataset of each participant or to SUN_LI_VEGA_STRATEGY to infer one home location per day per participant. See Observations below to know more. [MINIMUM_DAYS_TO_DETECT_HOME_CHANGES] The minimum number of consecutive days a new home location candidate has to repeat before it is considered the participant\u2019s new home. This parameter will be used only when [INFER_HOME_LOCATION_STRATEGY] is set to SUN_LI_VEGA_STRATEGY . [CLUSTERING_ALGORITHM] The original Doryab et al. implementation uses DBSCAN , OPTICS is also available with similar (but not identical) clustering results and lower memory consumption. [RADIUS_FOR_HOME] All location coordinates within this distance (meters) from the home location coordinates are considered a homestay (see timeathome feature). Features description for [PHONE_LOCATIONS][PROVIDERS][DORYAB] : Feature Units Description locationvariance \\(meters^2\\) The sum of the variances of the latitude and longitude columns. loglocationvariance - Log of the sum of the variances of the latitude and longitude columns. totaldistance meters Total distance traveled in a time segment using the haversine formula. avgspeed km/hr Average speed in a time segment considering only the instances labeled as Moving. varspeed km/hr Speed variance in a time segment considering only the instances labeled as Moving. circadianmovement - Deprecated, see Observations below. \u201cIt encodes the extent to which a person\u2019s location patterns follow a 24-hour circadian cycle.\" Doryab et al. . numberofsignificantplaces places Number of significant locations visited. It is calculated using the DBSCAN/OPTICS clustering algorithm which takes in EPS and MIN_SAMPLES as parameters to identify clusters. Each cluster is a significant place. numberlocationtransitions transitions Number of movements between any two clusters in a time segment. radiusgyration meters Quantifies the area covered by a participant timeattop1location minutes Time spent at the most significant location. timeattop2location minutes Time spent at the 2 nd most significant location. timeattop3location minutes Time spent at the 3 rd most significant location. movingtostaticratio - Ratio between stationary time and total location sensed time. A lat/long coordinate pair is labeled as stationary if its speed (distance/time) to the next coordinate pair is less than 1km/hr. A higher value represents a more stationary routine. outlierstimepercent - Ratio between the time spent in non-significant clusters divided by the time spent in all clusters (stationary time. Only stationary samples are clustered). A higher value represents more time spent in non-significant clusters. maxlengthstayatclusters minutes Maximum time spent in a cluster (significant location). minlengthstayatclusters minutes Minimum time spent in a cluster (significant location). avglengthstayatclusters minutes Average time spent in a cluster (significant location). stdlengthstayatclusters minutes Standard deviation of time spent in a cluster (significant location). locationentropy nats Shannon Entropy computed over the row count of each cluster (significant location), it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location). normalizedlocationentropy nats Shannon Entropy computed over the row count of each cluster (significant location) divided by the number of clusters; it is higher the more rows belong to a cluster (i.e., the more time a participant spent at a significant location). timeathome minutes Time spent at home (see Observations below for a description on how we compute home). homelabel - An integer that represents a different home location. It will be a constant number (1) for all participants when [INFER_HOME_LOCATION_STRATEGY] is set to DORYAB_STRATEGY or an incremental index if the strategy is set to SUN_LI_VEGA_STRATEGY . Assumptions/Observations Significant Locations Identified Significant locations are determined using DBSCAN clustering on locations that a patient visit over the course of the period of data collection. Circadian Movement Calculation Note Feb 3 2021. It seems the implementation of this feature is not correct; we suggest not to use this feature until a fix is in place. For a detailed description of how this should be calculated, see Saeb et al . Fine-Tuning Clustering Parameters Based on an experiment where we collected fused location data for 7 days with a mean accuracy of 86 & SD of 350.874635, we determined that EPS/MAX_EPS =100 produced closer clustering results to reality. Higher values (>100) missed out on some significant places, like a short grocery visit, while lower values (<100) picked up traffic lights and stop signs while driving as significant locations. We recommend you set EPS based on your location data\u2019s accuracy (the more accurate your data is, the lower you should be able to set EPS). Duration Calculation To calculate the time duration component for our features, we compute the difference between consecutive rows\u2019 timestamps to take into account sampling rate variability. If this time difference is larger than a threshold (300 seconds by default), we replace it with NA and label that row as Moving. Home location DORYAB_STRATEGY : home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm ( DBSCAN or OPTICS ) and considering the center of the biggest cluster home for that participant. SUN_LI_VEGA_STRATEGY : home is calculated using all location data of a participant between 12 am and 6 am, then applying a clustering algorithm ( DBSCAN or OPTICS ). The following steps are used to infer the home location per day for that participant: if there are records within [03:30:00, 04:30:00] for that night: we choose the most common cluster during that period as a home candidate for that day. elif there are records within [midnight, 03:30:00) for that night: we choose the last valid cluster during that period as a home candidate for that day. elif there are records within (04:30:00, 06:00:00] for that night: we choose the first valid cluster during that period as a home candidate for that day. else: the home location is NA (missing) for that day. If the count of consecutive days with the same candidate home location cluster label is larger or equal to [MINIMUM_DAYS_TO_DETECT_HOME_CHANGES] , the candidate will be regarded as the home cluster; otherwise, the home cluster will be the last valid day\u2019s cluster. If there are no valid clusters before that day, the first home location in the days after is used.","title":"DORYAB provider"},{"location":"features/phone-log/","text":"Phone Log \u00b6 Sensor parameters description for [PHONE_LOG] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where a data log is stored for Android devices [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where a data log is stored for iOS devices Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_LOG ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Log"},{"location":"features/phone-log/#phone-log","text":"Sensor parameters description for [PHONE_LOG] : Key Description [CONTAINER][ANDROID] Data stream container (database table, CSV file, etc.) where a data log is stored for Android devices [CONTAINER][IOS] Data stream container (database table, CSV file, etc.) where a data log is stored for iOS devices Note No feature providers have been implemented for this sensor yet, however you can use its key ( PHONE_LOG ) to improve PHONE_DATA_YIELD or you can implement your own features .","title":"Phone Log"},{"location":"features/phone-messages/","text":"Phone Messages \u00b6 Sensor parameters description for [PHONE_MESSAGES] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the messages data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"Phone Messages"},{"location":"features/phone-messages/#phone-messages","text":"Sensor parameters description for [PHONE_MESSAGES] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the messages data is stored","title":"Phone Messages"},{"location":"features/phone-messages/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_messages_raw.csv - data/raw/ { pid } /phone_messages_with_datetime.csv - data/interim/ { pid } /phone_messages_features/phone_messages_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_messages.csv Parameters description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_MESSAGES features from the RAPIDS provider [MESSAGES_TYPES] The messages_type that will be analyzed. The options for this parameter are received or sent . [FEATURES] Features to be computed, see table below for [MESSAGES_TYPES] received and sent Features description for [PHONE_MESSAGES][PROVIDERS][RAPIDS] : Feature Units Description count messages Number of messages of type messages_type that occurred during a particular time_segment . distinctcontacts contacts Number of distinct contacts that are associated with a particular messages_type during a particular time_segment . timefirstmessages minutes Number of minutes between 12:00am (midnight) and the first message of a particular messages_type during a particular time_segment . timelastmessages minutes Number of minutes between 12:00am (midnight) and the last message of a particular messages_type during a particular time_segment . countmostfrequentcontact messages Number of messages from the contact with the most messages of messages_type during a time_segment throughout the whole dataset of each participant. Assumptions/Observations [MESSAGES_TYPES] and [FEATURES] keys in config.yaml need to match. For example, [MESSAGES_TYPES] sent matches the [FEATURES] key sent","title":"RAPIDS provider"},{"location":"features/phone-screen/","text":"Phone Screen \u00b6 Sensor parameters description for [PHONE_SCREEN] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the screen data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes firstuseafter minutes Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"Phone Screen"},{"location":"features/phone-screen/#phone-screen","text":"Sensor parameters description for [PHONE_SCREEN] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the screen data is stored","title":"Phone Screen"},{"location":"features/phone-screen/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_screen_raw.csv - data/raw/ { pid } /phone_screen_with_datetime.csv - data/interim/ { pid } /phone_screen_episodes.csv - data/interim/ { pid } /phone_screen_episodes_resampled.csv - data/interim/ { pid } /phone_screen_episodes_resampled_with_datetime.csv - data/interim/ { pid } /phone_screen_features/phone_screen_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_screen.csv Parameters description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_SCREEN features from the RAPIDS provider [FEATURES] Features to be computed, see table below [REFERENCE_HOUR_FIRST_USE] The reference point from which firstuseafter is to be computed, default is midnight [IGNORE_EPISODES_SHORTER_THAN] Ignore episodes that are shorter than this threshold (minutes). Set to 0 to disable this filter. [IGNORE_EPISODES_LONGER_THAN] Ignore episodes that are longer than this threshold (minutes). Set to 0 to disable this filter. [EPISODE_TYPES] Currently we only support unlock episodes (from when the phone is unlocked until the screen is off) Features description for [PHONE_SCREEN][PROVIDERS][RAPIDS] : Feature Units Description sumduration minutes Total duration of all unlock episodes. maxduration minutes Longest duration of any unlock episode. minduration minutes Shortest duration of any unlock episode. avgduration minutes Average duration of all unlock episodes. stdduration minutes Standard deviation duration of all unlock episodes. countepisode episodes Number of all unlock episodes firstuseafter minutes Minutes until the first unlock episode. Assumptions/Observations In Android, lock events can happen right after an off event, after a few seconds of an off event, or never happen depending on the phone's settings, therefore, an unlock episode is defined as the time between an unlock and a off event. In iOS, on and off events do not exist, so an unlock episode is defined as the time between an unlock and a lock event. Events in iOS are recorded reliably albeit some duplicated lock events within milliseconds from each other, so we only keep consecutive unlock/lock pairs. In Android you cand find multiple consecutive unlock or lock events, so we only keep consecutive unlock/off pairs. In our experiments these cases are less than 10% of the screen events collected and this happens because ACTION_SCREEN_OFF and ACTION_SCREEN_ON are sent when the device becomes non-interactive which may have nothing to do with the screen turning off . In addition to unlock/off episodes, in Android it is possible to measure the time spent on the lock screen before an unlock event as well as the total screen time (i.e. ON to OFF ) but these are not implemented at the moment. We transform iOS screen events to match Android\u2019s format, we replace lock episodes with off episodes (2 with 0) in iOS. However, as mentioned above this is still computing unlock to lock episodes.","title":"RAPIDS provider"},{"location":"features/phone-wifi-connected/","text":"Phone WiFi Connected \u00b6 Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the wifi (connected) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"Phone WiFI Connected"},{"location":"features/phone-wifi-connected/#phone-wifi-connected","text":"Sensor parameters description for [PHONE_WIFI_CONNECTED] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the wifi (connected) data is stored","title":"Phone WiFi Connected"},{"location":"features/phone-wifi-connected/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android and iOS File Sequence - data/raw/ { pid } /phone_wifi_connected_raw.csv - data/raw/ { pid } /phone_wifi_connected_with_datetime.csv - data/interim/ { pid } /phone_wifi_connected_features/phone_wifi_connected_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_connected.csv Parameters description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_CONNECTED features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_CONNECTED][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points connected during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A connected WiFI access point is one that a phone was connected to. By default AWARE stores this data in the sensor_wifi table.","title":"RAPIDS provider"},{"location":"features/phone-wifi-visible/","text":"Phone WiFi Visible \u00b6 Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the wifi (visible) data is stored RAPIDS provider \u00b6 Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"Phone WiFI Visible"},{"location":"features/phone-wifi-visible/#phone-wifi-visible","text":"Sensor parameters description for [PHONE_WIFI_VISIBLE] : Key Description [CONTAINER] Data stream container (database table, CSV file, etc.) where the wifi (visible) data is stored","title":"Phone WiFi Visible"},{"location":"features/phone-wifi-visible/#rapids-provider","text":"Available time segments and platforms Available for all time segments Available for Android only File Sequence - data/raw/ { pid } /phone_wifi_visible_raw.csv - data/raw/ { pid } /phone_wifi_visible_with_datetime.csv - data/interim/ { pid } /phone_wifi_visible_features/phone_wifi_visible_ { language } _ { provider_key } .csv - data/processed/features/ { pid } /phone_wifi_visible.csv Parameters description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Key Description [COMPUTE] Set to True to extract PHONE_WIFI_VISIBLE features from the RAPIDS provider [FEATURES] Features to be computed, see table below Features description for [PHONE_WIFI_VISIBLE][PROVIDERS][RAPIDS] : Feature Units Description countscans devices Number of scanned WiFi access points visible during a time_segment, an access point can be detected multiple times over time and these appearances are counted separately uniquedevices devices Number of unique access point during a time_segment as identified by their hardware address countscansmostuniquedevice scans Number of scans of the most scanned access point during a time_segment across the whole monitoring period Assumptions/Observations A visible WiFI access point is one that a phone sensed around itself but that it was not connected to. Due to API restrictions, this sensor is not available on iOS. By default AWARE stores this data in the wifi table.","title":"RAPIDS provider"},{"location":"setup/configuration/","text":"Configuration \u00b6 You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features. Verify RAPIDS can process your data streams Create your participants files Select what time segments you want to extract features on Select the timezone of your study Configure your data streams Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs, we are referring to the corresponding key in the config.yaml file. Supported data streams \u00b6 A data stream refers to sensor data collected using a specific device with a specific format and stored in a specific container . For example, the aware_mysql data stream handles smartphone data ( device ) collected with the AWARE Framework ( format ) stored in a MySQL database ( container ). Check the table in introduction to data streams to know what data streams we support. If your data stream is supported, continue to the next configuration section, you will use its label later in this guide (e.g. aware_mysql ). If your steam is not supported, but you want to implement it, follow the tutorial to add support for new data streams and open a new discussion in Github with any questions. Participant files \u00b6 Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Remember to modify the config.yaml file with your PIDS The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you could run the following command, and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py Structure of participants files \u00b6 Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, a Fitbit device, and an Empatica device throughout the study between April 23 rd , 2020, and October 28 th , 2020 If your participants didn\u2019t use a [PHONE] , [FITBIT] or [EMPATICA] device, it is not necessary to include that section in their participant file. In other words, you can analyze data from 1 or more devices per participant. PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 EMPATICA : DEVICE_IDS : [ empatica1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). You can set [PLATFORMS]: [infer] , and RAPIDS will infer them automatically (each phone data stream infer this differently, e.g., aware_mysql uses the aware_device table). [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [EMPATICA] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Empatica device used by this participant. Since the most common use case involves having multiple zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid ) [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . Automatic creation of participant files \u00b6 You can use a CSV file with a row per participant to automatically create participant files. AWARE_DEVICE_TABLE was deprecated In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option, but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions below: SELECT device_id , device_id as fitbit_id , CONCAT ( \"p\" , _id ) as empatica_id , CONCAT ( \"p\" , _id ) as pid , if ( brand = \"iPhone\" , \"ios\" , \"android\" ) as platform , CONCAT ( \"p\" , _id ) as label , DATE_FORMAT ( FROM_UNIXTIME (( timestamp / 1000 ) - 86400 ), \"%Y-%m-%d\" ) as start_date , CURRENT_DATE as end_date from aware_device order by _id ; In your config.yaml : Set CSV_FILE_PATH to a CSV file path that complies with the specs described below Set the devices ( PHONE , FITBIT , EMPATICA ) [ADD] flag to TRUE depending on what devices you used in your study. CREATE_PARTICIPANT_FILES : CSV_FILE_PATH : \"your_path/to_your.csv\" PHONE_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] EMPATICA_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] Your CSV file ( [CSV_FILE_PATH] ) should have the following columns (headers), but the values within each column can be empty: Column Description device_id Phone device id. Separate multiple ids with ; fitbit_id Fitbit device id. Separate multiple ids with ; empatica_id Empatica device id. Since the most common use case involves having various zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid ) pid Unique identifiers with the format pXXX (your participant files will be named with this string) platform Use android , ios or infer as explained above, separate values with ; label A human-readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS . By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . end_date A string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS . By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . Example We added white spaces to this example to make it easy to read, but you don\u2019t have to. device_id ,fitbit_id, empatica_id ,pid ,label ,platform ,start_date ,end_date a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ,fitbit1 , p01 ,p01 ,julio ,android;ios ,2020-01-01 ,2021-01-01 4c4cf7a1-0340-44bc-be0f-d5053bf7390c ,fitbit2 , p02 ,p02 ,meng ,ios ,2021-01-01 ,2022-01-01 Then run snakemake -j1 create_participants_files Time Segments \u00b6 Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed every day, and all have the same duration (for example, 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute, but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday, March 7 th , 2020, and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would begin on Sunday, March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday, March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment. It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S repeats_on One of the following options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month, and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day , set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segment instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and last 23:59:59 ; the other four segments will begin at 6am, 12pm, 6pm, and 12am, respectively, and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute, but they can be as long as you want. The start of each segment can be shifted backward or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be calculated across all these segments). There cannot be two overlapping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or Fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyze. If you have participants with multiple device ids, you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes, and stress2 and stress4 are shifted backward by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day, and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some information for such computation will be extracted from all three segments, for example, the phone contact that called a participant the most, or the location clusters visited by a participant. Date time labels of event segments In the final feature file, you will find a row per event segment. The local_segment column of each row has a label , a start date-time string, and an end date-time string. weeklysurvey2060#2020-09-12 01 :00:00,2020-09-18 23 :59:59 All sensor data is always segmented based on timestamps, and the date-time strings are attached for informative purposes. For example, you can plot your features based on these strings. When you configure RAPIDS to work with a single time zone, such time zone code will be used to convert start/end timestamps (the ones you typed in the event segments file) into start/end date-time strings. However, when you configure RAPIDS to work with multiple time zones, RAPIDS will use the most common time zone across all devices of every participant to do the conversion. The most common time zone is the one in which a participant spent the most time. In practical terms, this means that the date-time strings of event segments that happened in uncommon time zones will have shifted start/end date-time labels. However, the data within each segment was correctly filtered based on timestamps. Segment Examples \u00b6 5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 Timezone of your study \u00b6 Single timezone \u00b6 If your study only happened in a single time zone or you want to ignore short trips of your participants to different time zones, select the appropriate code from this list and change the following config key. Double-check your timezone code pick; for example, US Eastern Time is America/New_York , not EST . TIMEZONE : TYPE : SINGLE TZCODE : America/New_York Multiple timezones \u00b6 If your participants lived in different time zones or they traveled across time zones, and you know when participants\u2019 devices were in a specific time zone, RAPIDS can use this data to process your data streams with the correct date-time. You need to provide RAPIDS with the time zone data in a CSV file ( [TZCODES_FILE] ) in the format described below. TIMEZONE : TYPE : MULTIPLE SINGLE : TZCODE : America/New_York MULTIPLE : TZCODES_FILE : path_to/time_zones_csv.file IF_MISSING_TZCODE : STOP DEFAULT_TZCODE : America/New_York FITBIT : ALLOW_MULTIPLE_TZ_PER_DEVICE : False INFER_FROM_SMARTPHONE_TZ : False Parameters for [TIMEZONE] Parameter Description [TYPE] Either SINGLE or MULTIPLE as explained above [SINGLE][TZCODE] The time zone code from this list to be used across all devices [MULTIPLE][TZCODES_FILE] A CSV file containing the time zones in which participants\u2019 devices sensed data (see the required format below). Multiple devices can be linked to the same person. Read more in Participants Files [MULTIPLE][IF_MISSING_TZCODE] When a device is missing from [TZCODES_FILE] Set this flag to STOP to stop RAPIDS execution and show an error, or to USE_DEFAULT to assign the time zone specified in [DEFAULT_TZCODE] to any such devices [MULTIPLE][FITBIT][ALLOW_MULTIPLE_TZ_PER_DEVICE] You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in \u201cHow does RAPIDS handle Fitbit devices?\u201d below. [MULTIPLE][FITBIT][INFER_FROM_SMARTPHONE_TZ] You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in \u201cHow does RAPIDS handle Fitbit devices?\u201d below. Format of TZCODES_FILE TZCODES_FILE has three columns and a row for each time zone a device visited (a device can be a smartphone or wearable (Fitbit/Empatica)): Column Description device_id A string that uniquely identifies a smartphone or wearable tzcode A string with the appropriate code from this list that represents the time zone where the device sensed data timestamp A UNIX timestamp indicating when was the first time this device_id sensed data in tzcode device_id, tzcode, timestamp 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/New_York, 1587500000000 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Mexico_City, 1587600000000 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles, 1587700000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 1587100000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Berlin, 1587200000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 1587300000000 Using this file, RAPDIS will create time zone intervals per device, for example for 13dbc8a3-dae3-4834-823a-4bc96a7d459d : Interval 1 [1587500000000, 1587599999999] for America/New_York Interval 2 [1587600000000, 1587699999999] for America/Mexico_City Interval 3 [1587700000000, now] for America/Los_Angeles Any sensor data row from a device will be assigned a timezone if it falls within that interval, for example: A screen row sensed at 1587533333333 will be assigned to America/New_York because it falls within Interval 1 A screen row sensed at 1587400000000 will be discarded because it was logged outside any interval. Can I get the TZCODES_FILE from the time zone table collected automatically by the AWARE app? Sure. You can put your timezone table ( timezone.csv ) collected by the AWARE app under data/external folder and run: python tools/create_multi_timezones_file.py The TZCODES_FILE will be saved as data/external/multiple_timezones.csv . What happens if participant X lives in Los Angeles but participant Y lives in Amsterdam and they both stayed there during my study? Add a row per participant and set timestamp to 0 : device_id, tzcode, timestamp 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles, 0 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 0 What happens if I forget to add a timezone for one or more devices? It depends on [IF_MISSING_TZCODE] . If [IF_MISSING_TZCODE] is set to STOP , RAPIDS will stop its execution and show you an error message. If [IF_MISSING_TZCODE] is set to USE_DEFAULT , it will assign the time zone specified in [DEFAULT_TZCODE] to any devices with missing time zone information in [TZCODES_FILE] . This is helpful if only a few of your participants had multiple timezones, and you don\u2019t want to specify the same time zone for the rest. How does RAPIDS handle Fitbit devices? Fitbit devices are not time zone aware, and they always log data with a local date-time string. When none of the Fitbit devices in your study changed time zones (e.g., p01 was always in New York and p02 was always in Amsterdam), you can set a single time zone per Fitbit device id along with a timestamp of 0 (you can still assign multiple time zones to smartphone device ids) device_id, tzcode, timestamp fitbit123, America/New_York, 0 fitbit999, Europe/Amsterdam, 0 On the other hand, when at least one of your Fitbit devices changed time zones AND you want RAPIDS to take into account these changes, you need to set [ALLOW_MULTIPLE_TZ_PER_DEVICE] to True . You have to manually allow this option because you need to be aware it can produce inaccurate features around the times when time zones changed . This is because we cannot know precisely when the Fitbit device detected and processed the time zone change. If you want to ALLOW_MULTIPLE_TZ_PER_DEVICE , you will need to add any time zone changes per device in the TZCODES_FILE as explained above. You could obtain this data by hand, but if your participants also used a smartphone during your study, you can use their time zone logs. Recall that in RAPIDS, every participant is represented with a participant file pXX.yaml , this file links together multiple devices, and we will use it to know what smartphone time zone data should be applied to Fitbit devices. Thus set INFER_FROM_SMARTPHONE_TZ to TRUE , if you have included smartphone time zone data in your TZCODE_FILE and want to make a participant\u2019s Fitbit data time zone aware with their respective smartphone data. Data Stream Configuration \u00b6 Modify the following keys in your config.yaml depending on the data stream you want to process. Phone Set [PHONE_DATA_STREAMS][TYPE] to the smartphone data stream you want to process (e.g. aware_mysql ) and configure its parameters (e.g. [DATABASE_GROUP] ). Ignore the parameters of streams you are not using (e.g. [FOLDER] of aware_csv ). PHONE_DATA_STREAMS : USE : aware_mysql # AVAILABLE: aware_mysql : DATABASE_GROUP : MY_GROUP aware_csv : FOLDER : data/external/aware_csv aware_mysql Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. aware_csv Key Description [FOLDER] Folder where you have to place a CSV file per phone sensor. Each file has to contain all the data from every participant you want to process. Fitbit Set [FITBIT_DATA_STREAMS][TYPE] to the Fitbit data stream you want to process (e.g. fitbitjson_mysql ) and configure its parameters (e.g. [DATABASE_GROUP] ). Ignore the parameters of the other streams you are not using (e.g. [FOLDER] of aware_csv ). Warning You will probably have to tell RAPIDS the name of the columns where you stored your Fitbit data. To do this, modify your chosen stream\u2019s format.yaml column mappings to match your raw data column names. FITBIT_DATA_STREAMS : USE : fitbitjson_mysql # AVAILABLE: fitbitjson_mysql : DATABASE_GROUP : MY_GROUP SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitjson_csv : FOLDER : data/external/fitbit_csv SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitparsed_mysql : DATABASE_GROUP : MY_GROUP SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitparsed_csv : FOLDER : data/external/fitbit_csv SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitjson_mysql This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitjson_mysql . Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. fitbitjson_csv This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitjson_csv . Key Description [FOLDER] Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process. [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. fitbitparsed_mysql This data stream process Fitbit data stored in multiple columns after being parsed from the JSON column returned by Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitparsed_mysql . Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. fitbitparsed_csv This data stream process Fitbit data stored in multiple columns (plain text) after being parsed from the JSON column returned by Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitparsed_csv . Key Description [FOLDER] Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process. [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Empatica Set [USE] to the Empatica data stream you want to use; see the table in introduction to data streams . Configure any parameters as indicated below. EMPATICA_DATA_STREAMS : USE : empatica_zip # AVAILABLE: empatica_zip : FOLDER : data/external/empatica empatica_zip Key Description [FOLDER] The relative path to a folder containing one subfolder per participant. The name of a participant folder should match their device_id assigned in their participant file. Each participant folder can have one or more zip files with any name; in other words, the sensor data in those zip files belong to a single participant. The zip files are automatically generated by Empatica and have a CSV file per sensor ( ACC , HR , TEMP , EDA , BVP , TAGS ). All CSV files of the same type contained in one or more zip files are uncompressed, parsed, sorted by timestamp, and joined together. Example of an EMPATICA FOLDER In the file tree below, we want to process three participants\u2019 data: p01 , p02 , and p03 . p01 has two zip files, p02 has only one zip file, and p03 has three zip files. Each zip has a CSV file per sensor that is joined together and processed by RAPIDS. data/ # this folder exists in the root RAPIDS folder external/ empatica/ p01/ file1.zip file2.zip p02/ aaaa.zip p03/ t1.zip t2.zip t3.zip Sensor and Features to Process \u00b6 Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Configuration"},{"location":"setup/configuration/#configuration","text":"You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features. Verify RAPIDS can process your data streams Create your participants files Select what time segments you want to extract features on Select the timezone of your study Configure your data streams Select what sensors and features you want to process When you are done with this configuration, go to executing RAPIDS . Hint Every time you see config[\"KEY\"] or [KEY] in these docs, we are referring to the corresponding key in the config.yaml file.","title":"Configuration"},{"location":"setup/configuration/#supported-data-streams","text":"A data stream refers to sensor data collected using a specific device with a specific format and stored in a specific container . For example, the aware_mysql data stream handles smartphone data ( device ) collected with the AWARE Framework ( format ) stored in a MySQL database ( container ). Check the table in introduction to data streams to know what data streams we support. If your data stream is supported, continue to the next configuration section, you will use its label later in this guide (e.g. aware_mysql ). If your steam is not supported, but you want to implement it, follow the tutorial to add support for new data streams and open a new discussion in Github with any questions.","title":"Supported data streams"},{"location":"setup/configuration/#participant-files","text":"Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically . Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure . Remember to modify the config.yaml file with your PIDS The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml , p02.yaml and p03.yaml files in /data/external/participant_files/ , then PIDS should be: PIDS : [ p01 , p02 , p03 ] Optional: Migrating participants files with the old format If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you could run the following command, and your old files will be converted into yaml files stored in data/external/participant_files/ python tools/update_format_participant_files.py","title":"Participant files"},{"location":"setup/configuration/#structure-of-participants-files","text":"Example of the structure of a participant file In this example, the participant used an android phone, an ios phone, a Fitbit device, and an Empatica device throughout the study between April 23 rd , 2020, and October 28 th , 2020 If your participants didn\u2019t use a [PHONE] , [FITBIT] or [EMPATICA] device, it is not necessary to include that section in their participant file. In other words, you can analyze data from 1 or more devices per participant. PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 , dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ] PLATFORMS : [ android , ios ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 FITBIT : DEVICE_IDS : [ fitbit1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 EMPATICA : DEVICE_IDS : [ empatica1 ] LABEL : test01 START_DATE : 2020-04-23 END_DATE : 2020-10-28 [PHONE] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study. [PLATFORMS] An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). You can set [PLATFORMS]: [infer] , and RAPIDS will infer them automatically (each phone data stream infer this differently, e.g., aware_mysql uses the aware_device table). [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [FITBIT] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study. [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [EMPATICA] Key Description [DEVICE_IDS] An array of the strings that uniquely identify each Empatica device used by this participant. Since the most common use case involves having multiple zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid ) [LABEL] A string that is used in reports and visualizations. [START_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . [END_DATE] A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS . Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 .","title":"Structure of participants files"},{"location":"setup/configuration/#automatic-creation-of-participant-files","text":"You can use a CSV file with a row per participant to automatically create participant files. AWARE_DEVICE_TABLE was deprecated In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option, but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions below: SELECT device_id , device_id as fitbit_id , CONCAT ( \"p\" , _id ) as empatica_id , CONCAT ( \"p\" , _id ) as pid , if ( brand = \"iPhone\" , \"ios\" , \"android\" ) as platform , CONCAT ( \"p\" , _id ) as label , DATE_FORMAT ( FROM_UNIXTIME (( timestamp / 1000 ) - 86400 ), \"%Y-%m-%d\" ) as start_date , CURRENT_DATE as end_date from aware_device order by _id ; In your config.yaml : Set CSV_FILE_PATH to a CSV file path that complies with the specs described below Set the devices ( PHONE , FITBIT , EMPATICA ) [ADD] flag to TRUE depending on what devices you used in your study. CREATE_PARTICIPANT_FILES : CSV_FILE_PATH : \"your_path/to_your.csv\" PHONE_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] FITBIT_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] EMPATICA_SECTION : ADD : TRUE # or FALSE IGNORED_DEVICE_IDS : [] Your CSV file ( [CSV_FILE_PATH] ) should have the following columns (headers), but the values within each column can be empty: Column Description device_id Phone device id. Separate multiple ids with ; fitbit_id Fitbit device id. Separate multiple ids with ; empatica_id Empatica device id. Since the most common use case involves having various zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid ) pid Unique identifiers with the format pXXX (your participant files will be named with this string) platform Use android , ios or infer as explained above, separate values with ; label A human-readable string that is used in reports and visualizations. start_date A string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS . By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . end_date A string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS . By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00 . Example We added white spaces to this example to make it easy to read, but you don\u2019t have to. device_id ,fitbit_id, empatica_id ,pid ,label ,platform ,start_date ,end_date a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ,fitbit1 , p01 ,p01 ,julio ,android;ios ,2020-01-01 ,2021-01-01 4c4cf7a1-0340-44bc-be0f-d5053bf7390c ,fitbit2 , p02 ,p02 ,meng ,ios ,2021-01-01 ,2022-01-01 Then run snakemake -j1 create_participants_files","title":"Automatic creation of participant files"},{"location":"setup/configuration/#time-segments","text":"Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples . Frequency Segments These segments are computed every day, and all have the same duration (for example, 30 minutes). Set the following keys in your config.yaml TIME_SEGMENTS : &time_segments TYPE : FREQUENCY FILE : \"data/external/your_frequency_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and only have 1 row. Column Description label A string that is used as a prefix in the name of your time segments length An integer representing the duration of your time segments in minutes Example label,length thirtyminutes,30 This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example: start_time,length,label 00:00,30,thirtyminutes0000 00:30,30,thirtyminutes0001 01:00,30,thirtyminutes0002 01:30,30,thirtyminutes0003 ... Periodic Segments These segments can be computed every day or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute, but they can be as long as you want. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : PERIODIC FILE : \"data/external/your_periodic_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE , RAPIDS will consider instances of your segments back enough in the past to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday, March 7 th , 2020, and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would begin on Sunday, March 1 st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday, March 8 th if FALSE . The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. It has to be unique between rows start_time A string with format HH:MM:SS representing the starting time of this segment on any day length A string representing the length of this segment. It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S repeats_on One of the following options every_day , wday , qday , mday , and yday . The last four represent a week, quarter, month, and year day repeats_value An integer complementing repeats_on . If you set repeats_on to every_day , set this to 0 , otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday , 1-91 represent a qday , and 1-366 represent a yday Example label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 morning,06:00:00,5H 59M 59S,every_day,0 afternoon,12:00:00,5H 59M 59S,every_day,0 evening,18:00:00,5H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 This configuration will create five segment instances ( daily , morning , afternoon , evening , night ) on any given day ( every_day set to 0). The daily segment will start at midnight and last 23:59:59 ; the other four segments will begin at 6am, 12pm, 6pm, and 12am, respectively, and last for 05:59:59 . Event segments These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute, but they can be as long as you want. The start of each segment can be shifted backward or forwards from the specified timestamp. Set the following keys in your config.yaml . TIME_SEGMENTS : &time_segments TYPE : EVENT FILE : \"data/external/your_event_segments.csv\" INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE # or TRUE The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows. Column Description label A string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be calculated across all these segments). There cannot be two overlapping event segments with the same label (RAPIDS will throw an error) event_timestamp A UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length , shift , and shift_direction length A string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S shift A string representing the time shift from event_timestamp . It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S . Use this value to change the start of a segment with respect to its event_timestamp . For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest ( shift_direction determines if it\u2019s before or after). shift_direction An integer representing whether the shift is before ( -1 ) or after ( 1 ) an event_timestamp device_id The device id (smartphone or Fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyze. If you have participants with multiple device ids, you can choose any of them Example label,event_timestamp,length,shift,shift_direction,device_id stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 This example will create eight segments for a single participant ( a748ee1a... ), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1 , stress3 , and stress5 are shifted forwards by 5 minutes, and stress2 and stress4 are shifted backward by 4 hours (that is, if the stress4 event happened on March 15 th at 1pm EST ( 1584291600000 ), the time segment will start on that day at 9am and end at 4pm). The three mood segments are 1 hour, 1 day, and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some information for such computation will be extracted from all three segments, for example, the phone contact that called a participant the most, or the location clusters visited by a participant. Date time labels of event segments In the final feature file, you will find a row per event segment. The local_segment column of each row has a label , a start date-time string, and an end date-time string. weeklysurvey2060#2020-09-12 01 :00:00,2020-09-18 23 :59:59 All sensor data is always segmented based on timestamps, and the date-time strings are attached for informative purposes. For example, you can plot your features based on these strings. When you configure RAPIDS to work with a single time zone, such time zone code will be used to convert start/end timestamps (the ones you typed in the event segments file) into start/end date-time strings. However, when you configure RAPIDS to work with multiple time zones, RAPIDS will use the most common time zone across all devices of every participant to do the conversion. The most common time zone is the one in which a participant spent the most time. In practical terms, this means that the date-time strings of event segments that happened in uncommon time zones will have shifted start/end date-time labels. However, the data within each segment was correctly filtered based on timestamps.","title":"Time Segments"},{"location":"setup/configuration/#segment-examples","text":"5-minutes Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study label,length fiveminutes,5 Daily Use the following Periodic segment file to create daily segments starting from midnight of every day in your study label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 Morning Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study label,start_time,length,repeats_on,repeats_value morning,06:00:00,5H 59M 59S,every_day,0 Overnight Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study label,start_time,length,repeats_on,repeats_value morning,20:00:00,11H 59M 59S,every_day,0 Weekly Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,wday,1 Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study label,start_time,length,repeats_on,repeats_value weekly,00:00:00,6D 23H 59M 59S,every_day,0 Week-ends Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study label,start_time,length,repeats_on,repeats_value weekend,00:00:00,1D 23H 59M 59S,wday,6 Around surveys Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants label,event_timestamp,length,shift,shift_direction,device_id survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524 survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3 survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3","title":"Segment Examples"},{"location":"setup/configuration/#timezone-of-your-study","text":"","title":"Timezone of your study"},{"location":"setup/configuration/#single-timezone","text":"If your study only happened in a single time zone or you want to ignore short trips of your participants to different time zones, select the appropriate code from this list and change the following config key. Double-check your timezone code pick; for example, US Eastern Time is America/New_York , not EST . TIMEZONE : TYPE : SINGLE TZCODE : America/New_York","title":"Single timezone"},{"location":"setup/configuration/#multiple-timezones","text":"If your participants lived in different time zones or they traveled across time zones, and you know when participants\u2019 devices were in a specific time zone, RAPIDS can use this data to process your data streams with the correct date-time. You need to provide RAPIDS with the time zone data in a CSV file ( [TZCODES_FILE] ) in the format described below. TIMEZONE : TYPE : MULTIPLE SINGLE : TZCODE : America/New_York MULTIPLE : TZCODES_FILE : path_to/time_zones_csv.file IF_MISSING_TZCODE : STOP DEFAULT_TZCODE : America/New_York FITBIT : ALLOW_MULTIPLE_TZ_PER_DEVICE : False INFER_FROM_SMARTPHONE_TZ : False Parameters for [TIMEZONE] Parameter Description [TYPE] Either SINGLE or MULTIPLE as explained above [SINGLE][TZCODE] The time zone code from this list to be used across all devices [MULTIPLE][TZCODES_FILE] A CSV file containing the time zones in which participants\u2019 devices sensed data (see the required format below). Multiple devices can be linked to the same person. Read more in Participants Files [MULTIPLE][IF_MISSING_TZCODE] When a device is missing from [TZCODES_FILE] Set this flag to STOP to stop RAPIDS execution and show an error, or to USE_DEFAULT to assign the time zone specified in [DEFAULT_TZCODE] to any such devices [MULTIPLE][FITBIT][ALLOW_MULTIPLE_TZ_PER_DEVICE] You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in \u201cHow does RAPIDS handle Fitbit devices?\u201d below. [MULTIPLE][FITBIT][INFER_FROM_SMARTPHONE_TZ] You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in \u201cHow does RAPIDS handle Fitbit devices?\u201d below. Format of TZCODES_FILE TZCODES_FILE has three columns and a row for each time zone a device visited (a device can be a smartphone or wearable (Fitbit/Empatica)): Column Description device_id A string that uniquely identifies a smartphone or wearable tzcode A string with the appropriate code from this list that represents the time zone where the device sensed data timestamp A UNIX timestamp indicating when was the first time this device_id sensed data in tzcode device_id, tzcode, timestamp 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/New_York, 1587500000000 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Mexico_City, 1587600000000 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles, 1587700000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 1587100000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Berlin, 1587200000000 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 1587300000000 Using this file, RAPDIS will create time zone intervals per device, for example for 13dbc8a3-dae3-4834-823a-4bc96a7d459d : Interval 1 [1587500000000, 1587599999999] for America/New_York Interval 2 [1587600000000, 1587699999999] for America/Mexico_City Interval 3 [1587700000000, now] for America/Los_Angeles Any sensor data row from a device will be assigned a timezone if it falls within that interval, for example: A screen row sensed at 1587533333333 will be assigned to America/New_York because it falls within Interval 1 A screen row sensed at 1587400000000 will be discarded because it was logged outside any interval. Can I get the TZCODES_FILE from the time zone table collected automatically by the AWARE app? Sure. You can put your timezone table ( timezone.csv ) collected by the AWARE app under data/external folder and run: python tools/create_multi_timezones_file.py The TZCODES_FILE will be saved as data/external/multiple_timezones.csv . What happens if participant X lives in Los Angeles but participant Y lives in Amsterdam and they both stayed there during my study? Add a row per participant and set timestamp to 0 : device_id, tzcode, timestamp 13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles, 0 65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam, 0 What happens if I forget to add a timezone for one or more devices? It depends on [IF_MISSING_TZCODE] . If [IF_MISSING_TZCODE] is set to STOP , RAPIDS will stop its execution and show you an error message. If [IF_MISSING_TZCODE] is set to USE_DEFAULT , it will assign the time zone specified in [DEFAULT_TZCODE] to any devices with missing time zone information in [TZCODES_FILE] . This is helpful if only a few of your participants had multiple timezones, and you don\u2019t want to specify the same time zone for the rest. How does RAPIDS handle Fitbit devices? Fitbit devices are not time zone aware, and they always log data with a local date-time string. When none of the Fitbit devices in your study changed time zones (e.g., p01 was always in New York and p02 was always in Amsterdam), you can set a single time zone per Fitbit device id along with a timestamp of 0 (you can still assign multiple time zones to smartphone device ids) device_id, tzcode, timestamp fitbit123, America/New_York, 0 fitbit999, Europe/Amsterdam, 0 On the other hand, when at least one of your Fitbit devices changed time zones AND you want RAPIDS to take into account these changes, you need to set [ALLOW_MULTIPLE_TZ_PER_DEVICE] to True . You have to manually allow this option because you need to be aware it can produce inaccurate features around the times when time zones changed . This is because we cannot know precisely when the Fitbit device detected and processed the time zone change. If you want to ALLOW_MULTIPLE_TZ_PER_DEVICE , you will need to add any time zone changes per device in the TZCODES_FILE as explained above. You could obtain this data by hand, but if your participants also used a smartphone during your study, you can use their time zone logs. Recall that in RAPIDS, every participant is represented with a participant file pXX.yaml , this file links together multiple devices, and we will use it to know what smartphone time zone data should be applied to Fitbit devices. Thus set INFER_FROM_SMARTPHONE_TZ to TRUE , if you have included smartphone time zone data in your TZCODE_FILE and want to make a participant\u2019s Fitbit data time zone aware with their respective smartphone data.","title":"Multiple timezones"},{"location":"setup/configuration/#data-stream-configuration","text":"Modify the following keys in your config.yaml depending on the data stream you want to process. Phone Set [PHONE_DATA_STREAMS][TYPE] to the smartphone data stream you want to process (e.g. aware_mysql ) and configure its parameters (e.g. [DATABASE_GROUP] ). Ignore the parameters of streams you are not using (e.g. [FOLDER] of aware_csv ). PHONE_DATA_STREAMS : USE : aware_mysql # AVAILABLE: aware_mysql : DATABASE_GROUP : MY_GROUP aware_csv : FOLDER : data/external/aware_csv aware_mysql Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. aware_csv Key Description [FOLDER] Folder where you have to place a CSV file per phone sensor. Each file has to contain all the data from every participant you want to process. Fitbit Set [FITBIT_DATA_STREAMS][TYPE] to the Fitbit data stream you want to process (e.g. fitbitjson_mysql ) and configure its parameters (e.g. [DATABASE_GROUP] ). Ignore the parameters of the other streams you are not using (e.g. [FOLDER] of aware_csv ). Warning You will probably have to tell RAPIDS the name of the columns where you stored your Fitbit data. To do this, modify your chosen stream\u2019s format.yaml column mappings to match your raw data column names. FITBIT_DATA_STREAMS : USE : fitbitjson_mysql # AVAILABLE: fitbitjson_mysql : DATABASE_GROUP : MY_GROUP SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitjson_csv : FOLDER : data/external/fitbit_csv SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitparsed_mysql : DATABASE_GROUP : MY_GROUP SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitparsed_csv : FOLDER : data/external/fitbit_csv SLEEP_SUMMARY_LAST_NIGHT_END : 660 fitbitjson_mysql This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitjson_mysql . Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. fitbitjson_csv This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitjson_csv . Key Description [FOLDER] Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process. [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. fitbitparsed_mysql This data stream process Fitbit data stored in multiple columns after being parsed from the JSON column returned by Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitparsed_mysql . Key Description [DATABASE_GROUP] A database credentials group. Read the instructions below to set it up [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine. fitbitparsed_csv This data stream process Fitbit data stored in multiple columns (plain text) after being parsed from the JSON column returned by Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitparsed_csv . Key Description [FOLDER] Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process. [SLEEP_SUMMARY_LAST_NIGHT_END] Segments are assigned based on this parameter. Any sleep episodes that start between today\u2019s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow\u2019s LNE are regarded as today\u2019s sleep episodes. While today\u2019s bedtime is based on today\u2019s sleep episodes, today\u2019s wake time is based on yesterday\u2019s sleep episodes. Empatica Set [USE] to the Empatica data stream you want to use; see the table in introduction to data streams . Configure any parameters as indicated below. EMPATICA_DATA_STREAMS : USE : empatica_zip # AVAILABLE: empatica_zip : FOLDER : data/external/empatica empatica_zip Key Description [FOLDER] The relative path to a folder containing one subfolder per participant. The name of a participant folder should match their device_id assigned in their participant file. Each participant folder can have one or more zip files with any name; in other words, the sensor data in those zip files belong to a single participant. The zip files are automatically generated by Empatica and have a CSV file per sensor ( ACC , HR , TEMP , EDA , BVP , TAGS ). All CSV files of the same type contained in one or more zip files are uncompressed, parsed, sorted by timestamp, and joined together. Example of an EMPATICA FOLDER In the file tree below, we want to process three participants\u2019 data: p01 , p02 , and p03 . p01 has two zip files, p02 has only one zip file, and p03 has three zip files. Each zip has a CSV file per sensor that is joined together and processed by RAPIDS. data/ # this folder exists in the root RAPIDS folder external/ empatica/ p01/ file1.zip file2.zip p02/ aaaa.zip p03/ t1.zip t2.zip t3.zip","title":"Data Stream Configuration"},{"location":"setup/configuration/#sensor-and-features-to-process","text":"Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature ( DEVICE_SENSOR ) and parameter structure which we explain in the Behavioral Features Introduction . Done Head over to Execution to learn how to execute RAPIDS.","title":"Sensor and Features to Process"},{"location":"setup/execution/","text":"Execution \u00b6 After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction We wrap Snakemake The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will execute the script that computes MESSAGES features and update its output file. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be loaded in memory at the same time. If RAPIDS crashes because it ran out of memory, reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Deleting RAPIDS output If you want to delete all the output files RAPIDS produces, you can execute the following command: ./rapids -j1 --delete-all-output Forcing a complete rerun or updating your raw data in RAPIDS If you want to update your raw data or rerun the whole pipeline from scratch, run the following commands: ./rapids -j1 --delete-all-output ./rapids -j1","title":"Execution"},{"location":"setup/execution/#execution","text":"After you have installed and configured RAPIDS, use the following command to execute it. ./rapids -j1 Ready to extract behavioral features If you are ready to extract features head over to the Behavioral Features Introduction We wrap Snakemake The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1 ). Updating RAPIDS output after modifying config.yaml Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will execute the script that computes MESSAGES features and update its output file. Multi-core You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However , take into account that this means multiple sensor datasets for different participants will be loaded in memory at the same time. If RAPIDS crashes because it ran out of memory, reduce the number of cores and try again. As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer). Deleting RAPIDS output If you want to delete all the output files RAPIDS produces, you can execute the following command: ./rapids -j1 --delete-all-output Forcing a complete rerun or updating your raw data in RAPIDS If you want to update your raw data or rerun the whole pipeline from scratch, run the following commands: ./rapids -j1 --delete-all-output ./rapids -j1","title":"Execution"},{"location":"setup/installation/","text":"Installation \u00b6 You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Linux (Ubuntu). Windows is supported through Docker or WSL. Docker Install Docker Pull our RAPIDS container docker pull moshiresearch/rapids:latest Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it moshiresearch/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the moshiresearch/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder inside the container. Warning If you installed RAPIDS using Docker for Windows on Windows 10, the container will have limits on the amount of RAM it can use. If you find that RAPIDS crashes due to running out of memory, increase this limit. MacOS We tested these instructions in Catalina and Big Sur M1 Macs RAPIDS can run on M1 Macs, the only changes as of Feb 21, 2021 are: R needs to be installed via brew under Rosetta (x86 arch) due to incompatibility issues with some R libraries. To do this, run your terminal via Rosetta , then proceed with the usual brew installation command. Use x86 brew to install R and restore RAPIDS\u2019 packages ( snakemake -j1 renv_install & snakemake -j1 renv_restore ). There is a bug related to timezone codes. We set the correct TZ_DIR in renv/activate.R (line #19) Sys.setenv(\"TZDIR\" = file.path(R.home(), \"share\", \"zoneinfo\")) (RAPIDS does this automatically). Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested RAPIDS on Ubuntu 18.04 & 20.04. Note that the necessary Python and R packages are available in other Linux distributions, so if you decide to give it a try, let us know and we can update these docs. Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev sudo apt install libglpk40 Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository Ubuntu 18.04 Bionic sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' Ubuntu 20.04 Focal sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install the R virtual environment management package (renv) snakemake -j1 renv_install Restore the R virtual environment Ubuntu 18.04 Bionic (fast) Run the following command to restore the R virtual environment using RSPM binaries R -e 'renv::restore(repos = c(CRAN = \"https://packagemanager.rstudio.com/all/__linux__/bionic/latest\"))' Ubuntu 20.04 Focal (fast) Run the following command to restore the R virtual environment using RSPM binaries R -e 'renv::restore(repos = c(CRAN = \"https://packagemanager.rstudio.com/all/__linux__/focal/latest\"))' Ubuntu (slow) If the fast installation command failed for some reason, you can restore the R virtual environment from source: R -e 'renv::restore()' Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Windows There are several options varying in complexity: You can use our Docker instructions (tested) You can use our Ubuntu 20.04 instructions on WSL2 (not tested but it will likely work) Native installation (experimental). If you would like to contribute to RAPIDS you could try to install MySQL, miniconda, Python, and R 4.0+ in Windows and restore the Python and R virtual environments using steps 6 and 7 of the instructions for Mac. You can get in touch if you would like to discuss this with the team.","title":"Installation"},{"location":"setup/installation/#installation","text":"You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Linux (Ubuntu). Windows is supported through Docker or WSL. Docker Install Docker Pull our RAPIDS container docker pull moshiresearch/rapids:latest Run RAPIDS' container (after this step is done you should see a prompt in the main RAPIDS folder with its python environment active) docker run -it moshiresearch/rapids:latest Pull the latest version of RAPIDS git pull Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Optional . You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension How to configure Remote Containers extension Make sure RAPIDS container is running Install the Remote - Containers extension Go to the Remote Explorer panel on the left hand sidebar On the top right dropdown menu choose Containers Double click on the moshiresearch/rapids container in the CONTAINERS tree A new VS Code session should open on RAPIDS main folder inside the container. Warning If you installed RAPIDS using Docker for Windows on Windows 10, the container will have limits on the amount of RAM it can use. If you find that RAPIDS crashes due to running out of memory, increase this limit. MacOS We tested these instructions in Catalina and Big Sur M1 Macs RAPIDS can run on M1 Macs, the only changes as of Feb 21, 2021 are: R needs to be installed via brew under Rosetta (x86 arch) due to incompatibility issues with some R libraries. To do this, run your terminal via Rosetta , then proceed with the usual brew installation command. Use x86 brew to install R and restore RAPIDS\u2019 packages ( snakemake -j1 renv_install & snakemake -j1 renv_restore ). There is a bug related to timezone codes. We set the correct TZ_DIR in renv/activate.R (line #19) Sys.setenv(\"TZDIR\" = file.path(R.home(), \"share\", \"zoneinfo\")) (RAPIDS does this automatically). Install brew Install MySQL brew install mysql brew services start mysql Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them brew install r brew install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install miniconda (restart your terminal afterwards) brew cask install miniconda conda init zsh # (or conda init bash) Clone our repo git clone https://github.com/carissalow/rapids Create a python virtual environment cd rapids conda env create -f environment.yml -n rapids conda activate rapids Install R packages and virtual environment: snakemake -j1 renv_install snakemake -j1 renv_restore Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Ubuntu We tested RAPIDS on Ubuntu 18.04 & 20.04. Note that the necessary Python and R packages are available in other Linux distributions, so if you decide to give it a try, let us know and we can update these docs. Install dependencies sudo apt install libcurl4-openssl-dev sudo apt install libssl-dev sudo apt install libxml2-dev sudo apt install libglpk40 Install MySQL sudo apt install libmysqlclient-dev sudo apt install mysql-server Add key for R\u2019s repository. sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9 Add R\u2019s repository Ubuntu 18.04 Bionic sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' Ubuntu 20.04 Focal sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' Install R 4.0. If you have other instances of R, we recommend uninstalling them sudo apt update sudo apt install r-base Install Pandoc and rmarkdown sudo apt install pandoc Rscript --vanilla -e 'install.packages(\"rmarkdown\", repos=\"http://cran.us.r-project.org\")' Install git sudo apt install git Install miniconda Restart your current shell Clone our repo: git clone https://github.com/carissalow/rapids Create a python virtual environment: cd rapids conda env create -f environment.yml -n MY_ENV_NAME conda activate MY_ENV_NAME Install the R virtual environment management package (renv) snakemake -j1 renv_install Restore the R virtual environment Ubuntu 18.04 Bionic (fast) Run the following command to restore the R virtual environment using RSPM binaries R -e 'renv::restore(repos = c(CRAN = \"https://packagemanager.rstudio.com/all/__linux__/bionic/latest\"))' Ubuntu 20.04 Focal (fast) Run the following command to restore the R virtual environment using RSPM binaries R -e 'renv::restore(repos = c(CRAN = \"https://packagemanager.rstudio.com/all/__linux__/focal/latest\"))' Ubuntu (slow) If the fast installation command failed for some reason, you can restore the R virtual environment from source: R -e 'renv::restore()' Note This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion. Make RAPIDS script executable chmod +x rapids Check that RAPIDS is working ./rapids -j1 Windows There are several options varying in complexity: You can use our Docker instructions (tested) You can use our Ubuntu 20.04 instructions on WSL2 (not tested but it will likely work) Native installation (experimental). If you would like to contribute to RAPIDS you could try to install MySQL, miniconda, Python, and R 4.0+ in Windows and restore the Python and R virtual environments using steps 6 and 7 of the instructions for Mac. You can get in touch if you would like to discuss this with the team.","title":"Installation"},{"location":"setup/overview/","text":"Overview \u00b6 Let\u2019s review some key concepts we use throughout these docs: Definition Description Device A mobile or wearable device, like smartphones, Fitbit wrist bands, Oura Rings, etc. Sensor A physical or digital module builtin in a device that produces a data stream. For example, a smartphone\u2019s accelerometer or screen. Data Stream Set of sensor data collected using a specific device with a particular ** format** and stored in a specific container . For example, smartphone (device) data collected with the AWARE Framework (format) and stored in a MySQL database (container). Data Stream Format Sensor data produced by a data stream have columns with specific names and types. RAPIDS can process a data stream using a format.yaml file that describes the raw data columns and any necessary transformations. Data Stream Container Sensor data produced by a data stream can be stored in a database, electronic files, or arbitrary electronic containers. RAPIDS can pull (download) the data from a stream using a container script implemented in R or Python. Participant A person that took part in a monitoring study Behavioral feature A metric computed from raw sensor data quantifying the behavior of a participant. For example, time spent at home calculated from location data. These are also known as digital biomarkers Time segment Time segments (or epochs) are the time windows on which RAPIDS extracts behavioral features. For example, you might want to compute participants\u2019 time at home every morning or only during weekends. You define time segments in a CSV file that RAPIDS processes. Time zone A string like America/New_York that represents a time zone where a device logged data. You can process data collected in single or multiple time zones for every participant. Feature Provider A script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . config.yaml A YAML file where you can modify parameters to process data streams and behavioral features. This is the heart of RAPIDS and the file that you will change the most. credentials.yaml A YAML file where you can define credential groups (user, password, host, etc.) if your data stream needs to connect to a database or Web API Participant file(s) A YAML file that links one or more smartphone or wearable devices used by a single participant. RAPIDS needs one file per participant. What can I do with RAPIDS? Extract behavioral features from smartphone, Fitbit, and Empatica\u2019s supported data streams Add your own behavioral features (we can include them in RAPIDS if you want to share them with the community) Add support for new data streams if yours cannot be processed by RAPIDS yet Create visualizations for data quality control and feature inspection Extending RAPIDS to organize your analysis and publish a code repository along with your code Hint We recommend you follow the Minimal Example tutorial to get familiar with RAPIDS In order to follow any of the previous tutorials, you will have to Install , Configure , and learn how to Execute RAPIDS. Open a new discussion in Github if you have any questions and open an issue to report any bugs. Frequently Asked Questions \u00b6 General \u00b6 What exactly is RAPIDS? RAPIDS is a group of configuration files and R and Python scripts executed by Snakemake . You can get a copy of RAPIDS by cloning our Github repository. RAPIDS is not a web application or server; all the processing is done in your laptop, server, or computer cluster. How does RAPIDS work? You will most of the time only have to modify configuration files in YAML format ( config.yaml , credentials.yaml , and participant files pxx.yaml ), and in CSV format (time zones and time segments). RAPIDS pulls data from different data containers and processes it in steps. The input/output of each stage is saved as a CSV file for inspection; you can check the files created for each sensor on its documentation page. All data is stored in data/ , and all processing Python and R scripts are stored in src/ . User and File interactions in RAPIDS In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS Data flow in RAPIDS In the figure below, we represent the flow of data in RAPIDS. In broad terms, smartphone and wearable devices log data streams with a certain format to a data container (database, file, etc.). RAPIDS can connect to these containers if it has a format.yaml and a container.[R|py] script used to pull the correct data and mutate it to comply with RAPIDS\u2019 internal data representation. Once the data stream is in RAPIDS, it goes through some basic transformations (scripts), one that assigns a time segment and a time zone to each data row, and another one that creates \u201cepisodes\u201d of data for some sensors that need it (like screen, battery, activity recognition, and sleep intraday data). After this, RAPIDS executes the requested PROVIDER script that computes behavioral features per time segment instance. After every feature is computed, they are joined per sensor, per participant, and study. Visualizations are built based on raw data or based on calculated features. Data stream flow in RAPIDS Is my data private? Absolutely, you are processing your data with your own copy of RAPIDS in your laptop, server, or computer cluster, so neither we nor anyone else can access your datasets. Do I need to have coding skills to use RAPIDS? If you want to extract the behavioral features or visualizations that RAPIDS offers out of the box, the answer is no. However, you need to be comfortable running commands in your terminal and familiar with editing YAML files and CSV files. If you want to add support for new data streams or behavioral features, you need to be familiar with R or Python. Is RAPIDS open-source or free? Yes, RAPIDS is both open-source and free. How do I cite RAPIDS? Please refer to our Citation guide ; depending on what parts of RAPIDS you used, we also ask you to cite the work of other authors that shared their work. I have a lot of data, can RAPIDS handle it/ is RAPIDS fast enough? Yes, we use Snakemake under the hood, so you can automatically distribute RAPIDS execution over multiple cores or clusters . RAPIDS processes data per sensor and participant, so it can take advantage of this parallel processing. What are the advantages of using RAPIDS over implementing my own analysis code? We believe RAPIDS can benefit your analysis in several ways: RAPIDS has more than 250 behavioral features available, many of them tested and used by other researchers. RAPIDS can extract features in dynamic time segments (for example, every x minutes, x hours, x days, x weeks, x months, etc.). This is handy because you don\u2019t have to deal with time zones, daylight saving changes, or date arithmetic. Your analysis is less prone to errors. Every participant sensor dataset is analyzed in the same way and isolated from each other. If you have lots of data, out-of-the-box parallel execution will speed up your analysis, and if your computer crashes, RAPIDS will start from where it left off. You can publish your analysis code along with your papers and be sure it will run exactly as it does on your computer. You can still add your own behavioral features and data streams if you need to, and the community will be able to reuse your work. Data Streams \u00b6 Can I process smartphone data collected with Beiwe, PurpleRobot, or app X? Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial . Open a new discussion in Github if you have any questions. If you do so, let us know so we can integrate your work into RAPIDS. Can I process data from Oura Rings, Actigraphs, or wearable X? The only wearables we support at the moment are Empatica and Fitbit. However, get in touch if you need to process data from a different wearable. We have limited resources, so we add support for additional devices on an as-needed basis, but we would be happy to collaborate. Open a new discussion in Github if you have any questions. Can I process smartphone or wearable data stored in PostgreSQL, Oracle, SQLite, CSV files, or data container X? Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial . If you are processing data streams we already support like AWARE, Fitbit, or Empatica and are just connecting to a different container, you can reuse their format.yaml and only implement a new container script. Open a new discussion in Github if you have any questions. If you do so, let us know so we can integrate your work into RAPIDS. I have participants that live in different time zones and some that travel; can RAPIDS handle this? Yes, RAPIDS can handle single or multiple timezones per participant. You can use time zone data collected by smartphones or collected by hand. Some of my participants used more than one device during my study; can RAPIDS handle this? Yes, you can link more than one smartphone or wearable device to a single participant. RAPIDS will merge them and sort them automatically. Some of my participants switched from Android to iOS or vice-versa during my study; can RAPIDS handle this? Yes, data from multiple smartphones can be linked to a single participant. All iOS data is converted to Android data before merging it. Extending RAPIDS \u00b6 Can I add my own behavioral features/digital biomarkers? Yes, you can implement your own features in R or Python following this tutorial Can I extract behavioral features based on two or more sensors? Yes, we do this for PHONE_DATA_YIELD (combines all phone sensors), PHONE_LOCATIONS (combines location and data yield data), PHONE_APPLICATIONS_BACKGROUND (combines screen and app usage data), and FITBIT_INTRADAY_STEPS (combines Fitbit and sleep and step data). However, we haven\u2019t come up with a user-friendly way to configure this, and currently, we join sensors on a case-by-case basis. This is mainly because not enough users have needed this functionality so far. Get in touch, and we can set it up together; the more use cases we are aware of, the easier it will be to integrate this into RAPIDS. I know how to program in Python or R but not both. Can I still use or extend RAPIDS? Yes, you don\u2019t need to write any code to use RAPIDS out of the box. If you need to add support for new data streams or behavioral features you can use scripts in either language. I have scripts that clean raw data from X sensor, can I use them with RAPIDS? Yes, you can add them as a [MUTATION][SCRIPT] in the format.yaml of the data stream you are using. You will add a main function that will receive a data frame with the raw data for that sensor that, in turn, will be used to compute behavioral features.","title":"Overview"},{"location":"setup/overview/#overview","text":"Let\u2019s review some key concepts we use throughout these docs: Definition Description Device A mobile or wearable device, like smartphones, Fitbit wrist bands, Oura Rings, etc. Sensor A physical or digital module builtin in a device that produces a data stream. For example, a smartphone\u2019s accelerometer or screen. Data Stream Set of sensor data collected using a specific device with a particular ** format** and stored in a specific container . For example, smartphone (device) data collected with the AWARE Framework (format) and stored in a MySQL database (container). Data Stream Format Sensor data produced by a data stream have columns with specific names and types. RAPIDS can process a data stream using a format.yaml file that describes the raw data columns and any necessary transformations. Data Stream Container Sensor data produced by a data stream can be stored in a database, electronic files, or arbitrary electronic containers. RAPIDS can pull (download) the data from a stream using a container script implemented in R or Python. Participant A person that took part in a monitoring study Behavioral feature A metric computed from raw sensor data quantifying the behavior of a participant. For example, time spent at home calculated from location data. These are also known as digital biomarkers Time segment Time segments (or epochs) are the time windows on which RAPIDS extracts behavioral features. For example, you might want to compute participants\u2019 time at home every morning or only during weekends. You define time segments in a CSV file that RAPIDS processes. Time zone A string like America/New_York that represents a time zone where a device logged data. You can process data collected in single or multiple time zones for every participant. Feature Provider A script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . config.yaml A YAML file where you can modify parameters to process data streams and behavioral features. This is the heart of RAPIDS and the file that you will change the most. credentials.yaml A YAML file where you can define credential groups (user, password, host, etc.) if your data stream needs to connect to a database or Web API Participant file(s) A YAML file that links one or more smartphone or wearable devices used by a single participant. RAPIDS needs one file per participant. What can I do with RAPIDS? Extract behavioral features from smartphone, Fitbit, and Empatica\u2019s supported data streams Add your own behavioral features (we can include them in RAPIDS if you want to share them with the community) Add support for new data streams if yours cannot be processed by RAPIDS yet Create visualizations for data quality control and feature inspection Extending RAPIDS to organize your analysis and publish a code repository along with your code Hint We recommend you follow the Minimal Example tutorial to get familiar with RAPIDS In order to follow any of the previous tutorials, you will have to Install , Configure , and learn how to Execute RAPIDS. Open a new discussion in Github if you have any questions and open an issue to report any bugs.","title":"Overview"},{"location":"setup/overview/#frequently-asked-questions","text":"","title":"Frequently Asked Questions"},{"location":"setup/overview/#general","text":"What exactly is RAPIDS? RAPIDS is a group of configuration files and R and Python scripts executed by Snakemake . You can get a copy of RAPIDS by cloning our Github repository. RAPIDS is not a web application or server; all the processing is done in your laptop, server, or computer cluster. How does RAPIDS work? You will most of the time only have to modify configuration files in YAML format ( config.yaml , credentials.yaml , and participant files pxx.yaml ), and in CSV format (time zones and time segments). RAPIDS pulls data from different data containers and processes it in steps. The input/output of each stage is saved as a CSV file for inspection; you can check the files created for each sensor on its documentation page. All data is stored in data/ , and all processing Python and R scripts are stored in src/ . User and File interactions in RAPIDS In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.). Interaction diagram between the user, and important files in RAPIDS Data flow in RAPIDS In the figure below, we represent the flow of data in RAPIDS. In broad terms, smartphone and wearable devices log data streams with a certain format to a data container (database, file, etc.). RAPIDS can connect to these containers if it has a format.yaml and a container.[R|py] script used to pull the correct data and mutate it to comply with RAPIDS\u2019 internal data representation. Once the data stream is in RAPIDS, it goes through some basic transformations (scripts), one that assigns a time segment and a time zone to each data row, and another one that creates \u201cepisodes\u201d of data for some sensors that need it (like screen, battery, activity recognition, and sleep intraday data). After this, RAPIDS executes the requested PROVIDER script that computes behavioral features per time segment instance. After every feature is computed, they are joined per sensor, per participant, and study. Visualizations are built based on raw data or based on calculated features. Data stream flow in RAPIDS Is my data private? Absolutely, you are processing your data with your own copy of RAPIDS in your laptop, server, or computer cluster, so neither we nor anyone else can access your datasets. Do I need to have coding skills to use RAPIDS? If you want to extract the behavioral features or visualizations that RAPIDS offers out of the box, the answer is no. However, you need to be comfortable running commands in your terminal and familiar with editing YAML files and CSV files. If you want to add support for new data streams or behavioral features, you need to be familiar with R or Python. Is RAPIDS open-source or free? Yes, RAPIDS is both open-source and free. How do I cite RAPIDS? Please refer to our Citation guide ; depending on what parts of RAPIDS you used, we also ask you to cite the work of other authors that shared their work. I have a lot of data, can RAPIDS handle it/ is RAPIDS fast enough? Yes, we use Snakemake under the hood, so you can automatically distribute RAPIDS execution over multiple cores or clusters . RAPIDS processes data per sensor and participant, so it can take advantage of this parallel processing. What are the advantages of using RAPIDS over implementing my own analysis code? We believe RAPIDS can benefit your analysis in several ways: RAPIDS has more than 250 behavioral features available, many of them tested and used by other researchers. RAPIDS can extract features in dynamic time segments (for example, every x minutes, x hours, x days, x weeks, x months, etc.). This is handy because you don\u2019t have to deal with time zones, daylight saving changes, or date arithmetic. Your analysis is less prone to errors. Every participant sensor dataset is analyzed in the same way and isolated from each other. If you have lots of data, out-of-the-box parallel execution will speed up your analysis, and if your computer crashes, RAPIDS will start from where it left off. You can publish your analysis code along with your papers and be sure it will run exactly as it does on your computer. You can still add your own behavioral features and data streams if you need to, and the community will be able to reuse your work.","title":"General"},{"location":"setup/overview/#data-streams","text":"Can I process smartphone data collected with Beiwe, PurpleRobot, or app X? Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial . Open a new discussion in Github if you have any questions. If you do so, let us know so we can integrate your work into RAPIDS. Can I process data from Oura Rings, Actigraphs, or wearable X? The only wearables we support at the moment are Empatica and Fitbit. However, get in touch if you need to process data from a different wearable. We have limited resources, so we add support for additional devices on an as-needed basis, but we would be happy to collaborate. Open a new discussion in Github if you have any questions. Can I process smartphone or wearable data stored in PostgreSQL, Oracle, SQLite, CSV files, or data container X? Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial . If you are processing data streams we already support like AWARE, Fitbit, or Empatica and are just connecting to a different container, you can reuse their format.yaml and only implement a new container script. Open a new discussion in Github if you have any questions. If you do so, let us know so we can integrate your work into RAPIDS. I have participants that live in different time zones and some that travel; can RAPIDS handle this? Yes, RAPIDS can handle single or multiple timezones per participant. You can use time zone data collected by smartphones or collected by hand. Some of my participants used more than one device during my study; can RAPIDS handle this? Yes, you can link more than one smartphone or wearable device to a single participant. RAPIDS will merge them and sort them automatically. Some of my participants switched from Android to iOS or vice-versa during my study; can RAPIDS handle this? Yes, data from multiple smartphones can be linked to a single participant. All iOS data is converted to Android data before merging it.","title":"Data Streams"},{"location":"setup/overview/#extending-rapids","text":"Can I add my own behavioral features/digital biomarkers? Yes, you can implement your own features in R or Python following this tutorial Can I extract behavioral features based on two or more sensors? Yes, we do this for PHONE_DATA_YIELD (combines all phone sensors), PHONE_LOCATIONS (combines location and data yield data), PHONE_APPLICATIONS_BACKGROUND (combines screen and app usage data), and FITBIT_INTRADAY_STEPS (combines Fitbit and sleep and step data). However, we haven\u2019t come up with a user-friendly way to configure this, and currently, we join sensors on a case-by-case basis. This is mainly because not enough users have needed this functionality so far. Get in touch, and we can set it up together; the more use cases we are aware of, the easier it will be to integrate this into RAPIDS. I know how to program in Python or R but not both. Can I still use or extend RAPIDS? Yes, you don\u2019t need to write any code to use RAPIDS out of the box. If you need to add support for new data streams or behavioral features you can use scripts in either language. I have scripts that clean raw data from X sensor, can I use them with RAPIDS? Yes, you can add them as a [MUTATION][SCRIPT] in the format.yaml of the data stream you are using. You will add a main function that will receive a data frame with the raw data for that sensor that, in turn, will be used to compute behavioral features.","title":"Extending RAPIDS"},{"location":"snippets/aware_format/","text":"If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn\u2019t need to modify this format (described below). Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs . The yaml file that describes the format of this data stream is at: src/data/streams/aware_csv/format.yaml For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works. Hint The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default. PHONE_ACCELEROMETER ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_VALUES_0 double_values_0 DOUBLE_VALUES_1 double_values_1 DOUBLE_VALUES_2 double_values_2 MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_ACTIVITY_RECOGNITION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME activity_name ACTIVITY_TYPE activity_type CONFIDENCE confidence MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id ACTIVITY_NAME FLAG_TO_MUTATE ACTIVITY_TYPE FLAG_TO_MUTATE CONFIDENCE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column ACTIVITIES activities CONFIDENCE confidence SCRIPTS src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R Note For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE : if stream\u2019s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0 if stream\u2019s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1 if stream\u2019s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7 if stream\u2019s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8 if stream\u2019s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3 if stream\u2019s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4 For RAPIDS CONFIDENCE column: if stream\u2019s confidence field is 0, set CONFIDENCE = 0 if stream\u2019s confidence field is 1, set CONFIDENCE = 50 if stream\u2019s confidence field is 2, set CONFIDENCE = 100 PHONE_APPLICATIONS_CRASHES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name APPLICATION_VERSION application_version ERROR_SHORT error_short ERROR_LONG error_long ERROR_CONDITION error_condition IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_FOREGROUND ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name IS_SYSTEM_APP is_system_app MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_APPLICATIONS_NOTIFICATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name APPLICATION_NAME application_name TEXT text SOUND sound VIBRATE vibrate DEFAULTS defaults FLAGS flags MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_BATTERY ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS battery_status BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Client V1 RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BATTERY_STATUS FLAG_TO_MUTATE BATTERY_LEVEL battery_level BATTERY_SCALE battery_scale MUTATION COLUMN_MAPPINGS Script column Stream column BATTERY_STATUS battery_status SCRIPTS src/data/streams/mutations/phone/aware/battery_ios_unification.R Note For RAPIDS BATTERY_STATUS column: if stream\u2019s battery_status field is 3, set BATTERY_STATUS = 5 (full status) if stream\u2019s battery_status field is 1, set BATTERY_STATUS = 3 (discharge) IOS Client V2 Same as ANDROID PHONE_BLUETOOTH ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id BT_ADDRESS bt_address BT_NAME bt_name BT_RSSI bt_rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android). PHONE_CALLS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE call_type CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id CALL_TYPE FLAG_TO_MUTATE CALL_DURATION call_duration TRACE trace MUTATION COLUMN_MAPPINGS Script column Stream column CALL_TYPE call_type SCRIPTS src/data/streams/mutations/phone/aware/calls_ios_unification.R Note We transform iOS call logs into Android\u2019s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android\u2019s events: 1=incoming, 2=outgoing, 3=missed. We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below): Search for the disconnected (4) status as it is common to all calls Group all events that preceded every status 4 We convert every 1,2,4 (or 2,1,4) sequence to an incoming call We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing) We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up) Tested with an Android (OnePlus 7T) and an iPhone XR Call type Android (duration) iOS (duration) New Rule Outgoing missed ended by me 2 (0) 3,4 (0,X) 3,4 is converted to 2 with duration 0 Outgoing missed ended by them 2(0) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2* Incoming missed ended by me NA** 1,4 (0,X) 1,4 is converted to 3 with duration 0 Incoming missed ended by them 3(0) 1,4 (0,X) 1,4 is converted to 3 with duration 0 Outgoing answered 2(X excluding dialing time) 3,2,4 (0,X,X2) 3,2,4 is converted to 2 with duration X2 Incoming answered 1(X excluding dialing time) 1,2,4 (0,X,X2) 1,2,4 is converted to 1 with duration X2 .* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered). .** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant. PHONE_CONVERSATION ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_ENERGY double_energy INFERENCE inference DOUBLE_CONVO_START FLAG_TO_MUTATE DOUBLE_CONVO_END FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column DOUBLE_CONVO_START double_convo_start DOUBLE_CONVO_END double_convo_end SCRIPTS src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R Note For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END : if stream\u2019s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start . if stream\u2019s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end . PHONE_KEYBOARD ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id PACKAGE_NAME package_name BEFORE_TEXT before_text CURRENT_TEXT current_text IS_PASSWORD is_password MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LIGHT ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LIGHT_LUX double_light_lux ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_LOCATIONS ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id DOUBLE_LATITUDE double_latitude DOUBLE_LONGITUDE double_longitude DOUBLE_BEARING double_bearing DOUBLE_SPEED double_speed DOUBLE_ALTITUDE double_altitude PROVIDER provider ACCURACY accuracy MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_LOG ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id LOG_MESSAGE log_message MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_MESSAGES ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MESSAGE_TYPE message_type TRACE trace MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS This sensor is not supported by iOS devices. PHONE_SCREEN ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS screen_status MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SCREEN_STATUS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column SCREEN_STATUS screen_status SCRIPTS src/data/streams/mutations/phone/aware/screen_ios_unification.R Note For SCREEN_STATUS RAPIDS column: if stream\u2019s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode). PHONE_WIFI_CONNECTED ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id MAC_ADDRESS mac_address SSID ssid BSSID bssid MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Same as ANDROID PHONE_WIFI_VISIBLE ANDROID RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP timestamp DEVICE_ID device_id SSID ssid BSSID bssid SECURITY security FREQUENCY frequency RSSI rssi MUTATION COLUMN_MAPPINGS (None) SCRIPTS (None) IOS Only old iOS versions supported this sensor (same mapping as Android).","title":"Aware format"},{"location":"snippets/database/","text":"Setting up a DATABASE_GROUP and its connection credentials. If you haven\u2019t done so, create an empty file called credentials.yaml in your RAPIDS root directory: Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database): MY_GROUP : database : MY_DATABASE host : MY_HOST password : MY_PASSWORD port : 3306 user : MY_USER Notes The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use. Indentation matters You can have more than one credentials group in credentials.yaml Upgrading from ./.env from RAPIDS 0.x In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options: Migrate your credentials by hand: change .env format [ MY_GROUP ] user=MY_USER password=MY_PASSWORD host=MY_HOST port=3306 database=MY_DATABASE to credentials.yaml format MY_GROUP : user : MY_USER password : MY_PASSWORD host : MY_HOST port : 3306 database : MY_DATABASE Use the migration script we provide (make sure your conda environment is active): python tools / update_format_env . py Connecting to localhost (host machine) from inside our docker container. If you are using RAPIDS\u2019 docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost . In a Linux host, you need to run our docker container using docker run --network=\"host\" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine.","title":"Database"},{"location":"snippets/feature_introduction_example/","text":"Sensor section Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS . Sensor Parameters. Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed. The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS] . We explain these parameters in a table at the top of each sensor documentation page. Sensor Providers Each object in this list represents a feature PROVIDER . Each sensor can have zero, one, or more providers. A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB] . In this example, there are two accelerometer feature providers RAPIDS and PANDA . PROVIDER Parameters Each PROVIDER has parameters that affect the computation of the behavioral features it offers. These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider\u2019s behavioral features. We explain every provider\u2019s parameter in a table under the Parameters description heading on each provider documentation page. PROVIDER Features Each PROVIDER offers a set of behavioral features. These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider. In either case, you can delete the features you are not interested in, and they will not be included in the sensor\u2019s output feature file. We explain each behavioral feature in a table under the Features description heading on each provider documentation page. PROVIDER script Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features. It has to be a relative path from RAPIDS\u2019 root folder and the script\u2019s parent folder should be named after the provider, e.g. panda .","title":"Feature introduction example"},{"location":"snippets/jsonfitbit_format/","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitjson_csv/format.yaml If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml . FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESOUTOFRANGE FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESFATBURN FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESCARDIO FLAG_TO_MUTATE HEARTRATE_DAILY_CALORIESPEAK FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id HEARTRATE FLAG_TO_MUTATE HEARTRATE_ZONE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the raw data RAPIDS expects for this data stream device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1200.6102,\u201dmax\u201d:88,\u201dmin\u201d:31,\u201dminutes\u201d:1058,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:760.3020,\u201dmax\u201d:120,\u201dmin\u201d:86,\u201dminutes\u201d:366,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:15.2048,\u201dmax\u201d:146,\u201dmin\u201d:120,\u201dminutes\u201d:2,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:72}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:68},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:67},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:67},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:1100.1120,\u201dmax\u201d:89,\u201dmin\u201d:30,\u201dminutes\u201d:921,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:660.0012,\u201dmax\u201d:118,\u201dmin\u201d:82,\u201dminutes\u201d:361,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:23.7088,\u201dmax\u201d:142,\u201dmin\u201d:108,\u201dminutes\u201d:3,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:221,\u201dmin\u201d:148,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:70}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:77},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:75},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:73},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201cactivities-heart\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:{\u201ccustomHeartRateZones\u201d:[],\u201dheartRateZones\u201d:[{\u201ccaloriesOut\u201d:750.3615,\u201dmax\u201d:77,\u201dmin\u201d:30,\u201dminutes\u201d:851,\u201dname\u201d:\u201dOut of Range\u201d},{\u201ccaloriesOut\u201d:734.1516,\u201dmax\u201d:107,\u201dmin\u201d:77,\u201dminutes\u201d:550,\u201dname\u201d:\u201dFat Burn\u201d},{\u201ccaloriesOut\u201d:131.8579,\u201dmax\u201d:130,\u201dmin\u201d:107,\u201dminutes\u201d:29,\u201dname\u201d:\u201dCardio\u201d},{\u201ccaloriesOut\u201d:0,\u201dmax\u201d:220,\u201dmin\u201d:130,\u201dminutes\u201d:0,\u201dname\u201d:\u201dPeak\u201d}],\u201drestingHeartRate\u201d:69}}],\u201dactivities-heart-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:90},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:89},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:88},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME FLAG_TO_MUTATE LOCAL_END_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id EFFICIENCY FLAG_TO_MUTATE MINUTES_AFTER_WAKEUP FLAG_TO_MUTATE MINUTES_ASLEEP FLAG_TO_MUTATE MINUTES_AWAKE FLAG_TO_MUTATE MINUTES_TO_FALL_ASLEEP FLAG_TO_MUTATE MINUTES_IN_BED FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE DEVICE_ID device_id TYPE_EPISODE_ID FLAG_TO_MUTATE DURATION FLAG_TO_MUTATE IS_MAIN_SLEEP FLAG_TO_MUTATE TYPE FLAG_TO_MUTATE LEVEL FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note Fitbit API has two versions for sleep data, v1 and v1.2, we support both. All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:3600000,\u201defficiency\u201d:92,\u201dendTime\u201d:\u201d2020-10-10T16:37:00.000\u201d,\u201dinfoCode\u201d:2,\u201disMainSleep\u201d:false,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-10T15:37:30.000\u201d,\u201dlevel\u201d:\u201dasleep\u201d,\u201dseconds\u201d:660},{\u201cdateTime\u201d:\u201d2020-10-10T15:48:30.000\u201d,\u201dlevel\u201d:\u201drestless\u201d,\u201dseconds\u201d:60},\u2026], \u201csummary\u201d:{\u201casleep\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:56},\u201dawake\u201d:{\u201ccount\u201d:0,\u201dminutes\u201d:0},\u201drestless\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:4}}},\u201dlogId\u201d:26315914306,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:55,\u201dminutesAwake\u201d:5,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T15:36:30.000\u201d,\u201dtimeInBed\u201d:60,\u201dtype\u201d:\u201dclassic\u201d},{\u201cdateOfSleep\u201d:\u201d2020-10-10\u201d,\u201dduration\u201d:22980000,\u201defficiency\u201d:88,\u201dendTime\u201d:\u201d2020-10-10T08:10:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:420},{\u201cdateTime\u201d:\u201d2020-10-10T01:53:30.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:1230},{\u201cdateTime\u201d:\u201d2020-10-10T02:14:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:360},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:3,\u201dminutes\u201d:92,\u201dthirtyDayAvgMinutes\u201d:0},\u201dlight\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:193,\u201dthirtyDayAvgMinutes\u201d:0},\u201drem\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:33,\u201dthirtyDayAvgMinutes\u201d:0},\u201dwake\u201d:{\u201ccount\u201d:28,\u201dminutes\u201d:65,\u201dthirtyDayAvgMinutes\u201d:0}}},\u201dlogId\u201d:26311786557,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:318,\u201dminutesAwake\u201d:65,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-10T01:46:30.000\u201d,\u201dtimeInBed\u201d:383,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:92,\u201dlight\u201d:193,\u201drem\u201d:33,\u201dwake\u201d:65},\u201dtotalMinutesAsleep\u201d:373,\u201dtotalSleepRecords\u201d:2,\u201dtotalTimeInBed\u201d:443}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-11\u201d,\u201dduration\u201d:41640000,\u201defficiency\u201d:89,\u201dendTime\u201d:\u201d2020-10-11T11:47:00.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:450},{\u201cdateTime\u201d:\u201d2020-10-11T00:20:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:870},{\u201cdateTime\u201d:\u201d2020-10-11T00:34:30.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:780},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:52,\u201dthirtyDayAvgMinutes\u201d:62},\u201dlight\u201d:{\u201ccount\u201d:32,\u201dminutes\u201d:442,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:6,\u201dminutes\u201d:68,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:29,\u201dminutes\u201d:132,\u201dthirtyDayAvgMinutes\u201d:94}}},\u201dlogId\u201d:26589710670,\u201dminutesAfterWakeup\u201d:1,\u201dminutesAsleep\u201d:562,\u201dminutesAwake\u201d:132,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-11T00:12:30.000\u201d,\u201dtimeInBed\u201d:694,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:52,\u201dlight\u201d:442,\u201drem\u201d:68,\u201dwake\u201d:132},\u201dtotalMinutesAsleep\u201d:562,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:694}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 {\u201csleep\u201d:[{\u201cdateOfSleep\u201d:\u201d2020-10-12\u201d,\u201dduration\u201d:28980000,\u201defficiency\u201d:93,\u201dendTime\u201d:\u201d2020-10-12T09:34:30.000\u201d,\u201dinfoCode\u201d:0,\u201disMainSleep\u201d:true,\u201dlevels\u201d:{\u201cdata\u201d:[{\u201cdateTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dlevel\u201d:\u201dwake\u201d,\u201dseconds\u201d:600},{\u201cdateTime\u201d:\u201d2020-10-12T01:41:00.000\u201d,\u201dlevel\u201d:\u201dlight\u201d,\u201dseconds\u201d:60},{\u201cdateTime\u201d:\u201d2020-10-12T01:42:00.000\u201d,\u201dlevel\u201d:\u201ddeep\u201d,\u201dseconds\u201d:2340},\u2026], \u201csummary\u201d:{\u201cdeep\u201d:{\u201ccount\u201d:4,\u201dminutes\u201d:63,\u201dthirtyDayAvgMinutes\u201d:59},\u201dlight\u201d:{\u201ccount\u201d:27,\u201dminutes\u201d:257,\u201dthirtyDayAvgMinutes\u201d:364},\u201drem\u201d:{\u201ccount\u201d:5,\u201dminutes\u201d:94,\u201dthirtyDayAvgMinutes\u201d:58},\u201dwake\u201d:{\u201ccount\u201d:24,\u201dminutes\u201d:69,\u201dthirtyDayAvgMinutes\u201d:95}}},\u201dlogId\u201d:26589710673,\u201dminutesAfterWakeup\u201d:0,\u201dminutesAsleep\u201d:415,\u201dminutesAwake\u201d:68,\u201dminutesToFallAsleep\u201d:0,\u201dstartTime\u201d:\u201d2020-10-12T01:31:00.000\u201d,\u201dtimeInBed\u201d:483,\u201dtype\u201d:\u201dstages\u201d}],\u201dsummary\u201d:{\u201cstages\u201d:{\u201cdeep\u201d:63,\u201dlight\u201d:257,\u201drem\u201d:94,\u201dwake\u201d:69},\u201dtotalMinutesAsleep\u201d:415,\u201dtotalSleepRecords\u201d:1,\u201dtotalTimeInBed\u201d:483}} FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_summary_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API. See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME FLAG_TO_MUTATE STEPS FLAG_TO_MUTATE MUTATION COLUMN_MAPPINGS Script column Stream column JSON_FITBIT_COLUMN fitbit_data SCRIPTS - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note TIMESTAMP , LOCAL_DATE_TIME , and STEPS are parsed from JSON_FITBIT_COLUMN . JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit\u2019s API . See an example of the raw data RAPIDS expects for this data stream: Example of the expected raw data device_id fitbit_data a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-07\u201d,\u201dvalue\u201d:\u201d1775\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:5},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:3},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-08\u201d,\u201dvalue\u201d:\u201d3201\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:14},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:11},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:10},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}} a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u201cactivities-steps\u201d:[{\u201cdateTime\u201d:\u201d2020-10-09\u201d,\u201dvalue\u201d:\u201d998\u201d}],\u201dactivities-steps-intraday\u201d:{\u201cdataset\u201d:[{\u201ctime\u201d:\u201d00:00:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:01:00\u201d,\u201dvalue\u201d:0},{\u201ctime\u201d:\u201d00:02:00\u201d,\u201dvalue\u201d:0},\u2026],\u201ddatasetInterval\u201d:1,\u201ddatasetType\u201d:\u201dminute\u201d}}","title":"Jsonfitbit format"},{"location":"snippets/parsedfitbit_format/","text":"The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors . This file is at: src/data/streams/fitbitparsed_mysql/format.yaml If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names. All columns are mandatory; however, all except device_id and local_date_time can be empty if you don\u2019t have that data. Just have in mind that some features will be empty if some of these columns are empty. FITBIT_HEARTRATE_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE_DAILY_RESTINGHR heartrate_daily_restinghr HEARTRATE_DAILY_CALORIESOUTOFRANGE heartrate_daily_caloriesoutofrange HEARTRATE_DAILY_CALORIESFATBURN heartrate_daily_caloriesfatburn HEARTRATE_DAILY_CALORIESCARDIO heartrate_daily_caloriescardio HEARTRATE_DAILY_CALORIESPEAK heartrate_daily_caloriespeak MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate_daily_restinghr heartrate_daily_caloriesoutofrange heartrate_daily_caloriesfatburn heartrate_daily_caloriescardio heartrate_daily_caloriespeak a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 72 1200.6102 760.3020 15.2048 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 70 1100.1120 660.0012 23.7088 0 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 69 750.3615 734.1516 131.8579 0 FITBIT_HEARTRATE_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id HEARTRATE heartrate HEARTRATE_ZONE heartrate_zone MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the raw data RAPIDS expects for this data stream device_id local_date_time heartrate heartrate_zone a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 68 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 67 outofrange a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 67 outofrange FITBIT_SLEEP_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME FLAG_TO_MUTATE LOCAL_START_DATE_TIME local_start_date_time LOCAL_END_DATE_TIME local_end_date_time DEVICE_ID device_id EFFICIENCY efficiency MINUTES_AFTER_WAKEUP minutes_after_wakeup MINUTES_ASLEEP minutes_asleep MINUTES_AWAKE minutes_awake MINUTES_TO_FALL_ASLEEP minutes_to_fall_asleep MINUTES_IN_BED minutes_in_bed IS_MAIN_SLEEP is_main_sleep TYPE type MUTATION COLUMN_MAPPINGS (None) SCRIPTS - src/data/streams/mutations/fitbit/add_local_date_time.py - src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1\u2019s count_awake , duration_awake , and count_awakenings , count_restless , duration_restless columns. Example of the expected raw data device_id local_start_date_time local_end_date_time efficiency minutes_after_wakeup minutes_asleep minutes_awake minutes_to_fall_asleep minutes_in_bed is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 15:36:30 2020-10-10 16:37:00 92 0 55 5 0 60 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-10 01:46:30 2020-10-10 08:10:00 88 0 318 65 0 383 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-11 00:12:30 2020-10-11 11:47:00 89 1 562 132 0 694 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-12 01:31:00 2020-10-12 09:34:30 93 0 415 68 0 483 1 stages FITBIT_SLEEP_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE LOCAL_DATE_TIME local_date_time DEVICE_ID device_id TYPE_EPISODE_ID type_episode_id DURATION duration IS_MAIN_SLEEP is_main_sleep TYPE type LEVEL level MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Fitbit API has two versions for sleep data, v1 and v1.2, we support both. Example of the expected raw data device_id type_episode_id local_date_time duration level is_main_sleep type a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:36:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:37:30 660 asleep 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 0 2020-10-10 15:48:30 60 restless 0 classic a748ee1a-1d0b-4ae9-9074-279a2b6ba524 \u2026 \u2026 \u2026 \u2026 \u2026 \u2026 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:46:30 420 light 1 stages a748ee1a-1d0b-4ae9-9074-279a2b6ba524 1 2020-10-10 01:53:30 1230 deep 1 stages FITBIT_STEPS_SUMMARY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 1775 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-08 3201 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-09 998 FITBIT_STEPS_INTRADAY RAPIDS_COLUMN_MAPPINGS RAPIDS column Stream column TIMESTAMP FLAG_TO_MUTATE DEVICE_ID device_id LOCAL_DATE_TIME local_date_time STEPS steps MUTATION COLUMN_MAPPINGS (None) SCRIPTS src/data/streams/mutations/fitbit/add_zero_timestamp.py Note add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones. Example of the expected raw data device_id local_date_time steps a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:00:00 5 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:01:00 3 a748ee1a-1d0b-4ae9-9074-279a2b6ba524 2020-10-07 00:02:00 0","title":"Parsedfitbit format"},{"location":"visualizations/data-quality-visualizations/","text":"Data Quality Visualizations \u00b6 We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (example01 and example02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.) 1. Histograms of phone data yield \u00b6 RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, morning, afternoon, evening, and night) 2. Heatmaps of overall data yield \u00b6 These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis can be labelled with the absolute time of the start of each time segment instance or the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants example01 and example02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). [ABSOLUTE_TIME] Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants [RELATIVE_TIME] Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants 3. Heatmap of recorded phone sensors \u00b6 In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant example01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 7 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant 4. Heatmap of sensor row count \u00b6 These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 14 phone sensors (including data yield) of example01\u2019s daily segments. From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"Data Quality"},{"location":"visualizations/data-quality-visualizations/#data-quality-visualizations","text":"We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (example01 and example02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night). Note Time segments (e.g. daily , morning , etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.)","title":"Data Quality Visualizations"},{"location":"visualizations/data-quality-visualizations/#1-histograms-of-phone-data-yield","text":"RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours , respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes. These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes. Example Click here to see an example of these interactive visualizations in HTML format Histogram of the data yielded minute ratio for a single participant during five time segments (daily, morning, afternoon, evening, and night)","title":"1. Histograms of phone data yield"},{"location":"visualizations/data-quality-visualizations/#2-heatmaps-of-overall-data-yield","text":"These heatmaps are a break down per time segment and per participant of Visualization 1 . Heatmap\u2019s rows represent participants, columns represent time segment instances and the cells\u2019 color represent the valid yielded minute or hour ratio for a participant during a time segment instance. As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis can be labelled with the absolute time of the start of each time segment instance or the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment. The figure below shows the heatmap of the valid yielded minute ratio for participants example01 and example02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones). [ABSOLUTE_TIME] Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants [RELATIVE_TIME] Example Click here to see an example of these interactive visualizations in HTML format Overall compliance heatmap for all participants","title":"2. Heatmaps of overall data yield"},{"location":"visualizations/data-quality-visualizations/#3-heatmap-of-recorded-phone-sensors","text":"In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells\u2019 color shows the number of phone sensors that logged at least one row of data during those 1-minute windows. RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes). The figure below shows this heatmap for phone sensors collected by participant example01 in daily time segments from Apr 23 rd 2020 to May 4 th 2020. We can infer that for most of the monitoring time, the participant\u2019s phone logged data from at least 7 sensors each minute. Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the recorded phone sensors per minute and per time segment of a single participant","title":"3. Heatmap of recorded phone sensors"},{"location":"visualizations/data-quality-visualizations/#4-heatmap-of-sensor-row-count","text":"These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2 . Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2. In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell\u2019s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis. The figure below shows data for 14 phone sensors (including data yield) of example01\u2019s daily segments. From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23 rd ), peaks of location coordinates on Apr 26 th and Apr 30 th , and no sent or received SMS except for Apr 23 rd , Apr 29 th and Apr 30 th (unlabeled row between screen and locations). Example Click here to see an example of these interactive visualizations in HTML format Heatmap of the sensor row count per time segment of a single participant","title":"4. Heatmap of sensor row count"},{"location":"visualizations/feature-visualizations/","text":"Feature Visualizations \u00b6 1. Heatmap Correlation Matrix \u00b6 Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"Features"},{"location":"visualizations/feature-visualizations/#feature-visualizations","text":"","title":"Feature Visualizations"},{"location":"visualizations/feature-visualizations/#1-heatmap-correlation-matrix","text":"Columns and rows are the behavioral features computed in RAPIDS, cells\u2019 color represents the correlation coefficient between all days of data for every pair of features of all participants. The user can specify a minimum number of observations ( time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default). Example Click here to see an example of these interactive visualizations in HTML format Correlation matrix heatmap for all the features of all participants","title":"1. Heatmap Correlation Matrix"},{"location":"workflow-examples/analysis/","text":"Analysis Workflow Example \u00b6 TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers Why should I integrate my analysis in RAPIDS? \u00b6 Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS. Analysis workflow structure \u00b6 To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules . Description of the study modeled in our analysis workflow example \u00b6 Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share files with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation Configure and run the analysis workflow example \u00b6 Install RAPIDS Unzip the CSV files inside rapids_example_csv.zip in data/external/example_workflow/*.csv . Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile Note you will see a lot of warning messages, you can ignore them since they happen because we ran ML algorithms with a small fake dataset. Modules of our analysis workflow example \u00b6 1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the data/external/example_workflow/participant_info.csv file. As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the data/external/example_workflow/participant_target.csv file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_sensor_features_for_all_participants rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 158 features over 11 days for the individual model of p01, 101 features over 12 days for the individual model of p02 and 106 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Complete Example"},{"location":"workflow-examples/analysis/#analysis-workflow-example","text":"TL;DR In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models) We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation Use this example as a guide to structure your own analysis within RAPIDS RAPIDS analysis workflows are compatible with your favorite data science tools and libraries RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers","title":"Analysis Workflow Example"},{"location":"workflow-examples/analysis/#why-should-i-integrate-my-analysis-in-rapids","text":"Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS. We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is. In the following sections we share an example of how we structured an analysis workflow in RAPIDS.","title":"Why should I integrate my analysis in RAPIDS?"},{"location":"workflow-examples/analysis/#analysis-workflow-structure","text":"To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step). Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern. Hint The example\u2019s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile . The config file is already configured to process the sensor data as explained in Analysis workflow modules .","title":"Analysis workflow structure"},{"location":"workflow-examples/analysis/#description-of-the-study-modeled-in-our-analysis-workflow-example","text":"Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score. The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model. In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share files with test data in an Open Science Framework repository. Modules of RAPIDS example workflow, from raw data to model evaluation","title":"Description of the study modeled in our analysis workflow example"},{"location":"workflow-examples/analysis/#configure-and-run-the-analysis-workflow-example","text":"Install RAPIDS Unzip the CSV files inside rapids_example_csv.zip in data/external/example_workflow/*.csv . Create the participant files for this example by running: ./rapids -j1 create_example_participant_files Run the example pipeline with: ./rapids -j1 --profile example_profile Note you will see a lot of warning messages, you can ignore them since they happen because we ran ML algorithms with a small fake dataset.","title":"Configure and run the analysis workflow example"},{"location":"workflow-examples/analysis/#modules-of-our-analysis-workflow-example","text":"1. Feature extraction We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant. 2. Extract demographic data. It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants\u2019 age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the data/external/example_workflow/participant_info.csv file. As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk . 3. Create target labels. The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the data/external/example_workflow/participant_target.csv file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk . 4. Feature merging. These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_sensor_features_for_all_participants rules in rules/features.smk . 5. Data visualization. At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots. 6. Feature cleaning. In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml . After this step, we kept 158 features over 11 days for the individual model of p01, 101 features over 12 days for the individual model of p02 and 106 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones. Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk . 7. Merge features and targets. In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk . Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk . These two merged files are the input for our individual and population models. 8. Modelling. This stage has three phases: model building, training and evaluation. In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package\u2019s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml . In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml . Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle. Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk . 9. Compute model baselines. We create three baselines to evaluate our classification models. First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model. Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk .","title":"Modules of our analysis workflow example"},{"location":"workflow-examples/minimal/","text":"Minimal Working Example \u00b6 This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for 24 hr ( 00:00:00 to 23:59:59 ) and night ( 00:00:00 to 05:59:59 ) time segments of every day of data of one participant that was monitored on the US East coast with an Android smartphone. Install RAPIDS and make sure your conda environment is active (see Installation ) Download this CSV file and save it as data/external/aware_csv/calls.csv Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Required configuration changes ( click to expand ) Supported data streams . Based on the docs, we decided to use the aware_csv data stream because we are processing aware data saved in a CSV file. We will use this label in a later step; there\u2019s no need to type it or save it anywhere yet. Create your participants file . Since we are processing data from a single participant, you only need to create a single participant file called p01.yaml in data/external/participant_files . This participant file only has a PHONE section because this hypothetical participant was only monitored with a smartphone. Note that for a real analysis, you can do this automatically with a CSV file Add p01 to [PIDS] in config.yaml Create a file in data/external/participant_files/p01.yaml with the following content: PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 ] # the participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty Select what time segments you want to extract features on. Set [TIME_SEGMENTS][FILE] to data/external/timesegments_periodic.csv Create a file in data/external/timesegments_periodic.csv with the following content label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 Choose the timezone of your study . We will use the default time zone settings since this example is processing data collected on the US East Coast ( America/New_York ) TIMEZONE : TYPE : SINGLE SINGLE : TZCODE : America/New_York Modify your device data stream configuration Set [PHONE_DATA_STREAMS][USE] to aware_csv . We will use the default value for [PHONE_DATA_STREAMS][aware_csv][FOLDER] since we already stored the test calls CSV file there. Select what sensors and features you want to process. Set [PHONE_CALLS][CONTAINER] to calls.csv in the config.yaml file. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True in the config.yaml file. Example of the config.yaml sections after the changes outlined above This will be your config.yaml after following the instructions above. Click on the numbered markers to know more. PIDS : [ p01 ] # (1) TIMEZONE : TYPE : SINGLE # (2) SINGLE : TZCODE : America/New_York # ... other irrelevant sections TIME_SEGMENTS : &time_segments TYPE : PERIODIC # (3) FILE : \"data/external/timesegments_periodic.csv\" # (4) INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE PHONE_DATA_STREAMS : USE : aware_csv # (5) aware_csv : FOLDER : data/external/aware_csv # (6) # ... other irrelevant sections ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS : CONTAINER : calls.csv # (7) PROVIDERS : RAPIDS : COMPUTE : True # (8) CALL_TYPES : ... We added p01 to PIDS after creating the participant file: data/external/participant_files/p01.yaml With the following content: PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 ] # the participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty We use the default SINGLE time zone. We use the default PERIODIC time segment [TYPE] We created this time segments file with these lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,001:00:00,5H 59M 59S,every_day,0 We set [USE] to aware_device to tell RAPIDS to process sensor data collected with the AWARE Framework stored in CSV files. We used the default [FOLDER] for awre_csv since we already stored our test calls.csv file there We changed [CONTAINER] to calls.csv to process our test call data. We flipped [COMPUTE] to True to extract call behavioral features using the RAPIDS feature provider. Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in data/processed/features/all_participants/all_sensor_features.csv","title":"Minimal Example"},{"location":"workflow-examples/minimal/#minimal-working-example","text":"This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for 24 hr ( 00:00:00 to 23:59:59 ) and night ( 00:00:00 to 05:59:59 ) time segments of every day of data of one participant that was monitored on the US East coast with an Android smartphone. Install RAPIDS and make sure your conda environment is active (see Installation ) Download this CSV file and save it as data/external/aware_csv/calls.csv Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done) Required configuration changes ( click to expand ) Supported data streams . Based on the docs, we decided to use the aware_csv data stream because we are processing aware data saved in a CSV file. We will use this label in a later step; there\u2019s no need to type it or save it anywhere yet. Create your participants file . Since we are processing data from a single participant, you only need to create a single participant file called p01.yaml in data/external/participant_files . This participant file only has a PHONE section because this hypothetical participant was only monitored with a smartphone. Note that for a real analysis, you can do this automatically with a CSV file Add p01 to [PIDS] in config.yaml Create a file in data/external/participant_files/p01.yaml with the following content: PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 ] # the participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty Select what time segments you want to extract features on. Set [TIME_SEGMENTS][FILE] to data/external/timesegments_periodic.csv Create a file in data/external/timesegments_periodic.csv with the following content label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,00:00:00,5H 59M 59S,every_day,0 Choose the timezone of your study . We will use the default time zone settings since this example is processing data collected on the US East Coast ( America/New_York ) TIMEZONE : TYPE : SINGLE SINGLE : TZCODE : America/New_York Modify your device data stream configuration Set [PHONE_DATA_STREAMS][USE] to aware_csv . We will use the default value for [PHONE_DATA_STREAMS][aware_csv][FOLDER] since we already stored the test calls CSV file there. Select what sensors and features you want to process. Set [PHONE_CALLS][CONTAINER] to calls.csv in the config.yaml file. Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True in the config.yaml file. Example of the config.yaml sections after the changes outlined above This will be your config.yaml after following the instructions above. Click on the numbered markers to know more. PIDS : [ p01 ] # (1) TIMEZONE : TYPE : SINGLE # (2) SINGLE : TZCODE : America/New_York # ... other irrelevant sections TIME_SEGMENTS : &time_segments TYPE : PERIODIC # (3) FILE : \"data/external/timesegments_periodic.csv\" # (4) INCLUDE_PAST_PERIODIC_SEGMENTS : FALSE PHONE_DATA_STREAMS : USE : aware_csv # (5) aware_csv : FOLDER : data/external/aware_csv # (6) # ... other irrelevant sections ############## PHONE ########################################################### ################################################################################ # ... other irrelevant sections # Communication call features config, TYPES and FEATURES keys need to match PHONE_CALLS : CONTAINER : calls.csv # (7) PROVIDERS : RAPIDS : COMPUTE : True # (8) CALL_TYPES : ... We added p01 to PIDS after creating the participant file: data/external/participant_files/p01.yaml With the following content: PHONE : DEVICE_IDS : [ a748ee1a-1d0b-4ae9-9074-279a2b6ba524 ] # the participant's AWARE device id PLATFORMS : [ android ] # or ios LABEL : MyTestP01 # any string START_DATE : 2020-01-01 # this can also be empty END_DATE : 2021-01-01 # this can also be empty We use the default SINGLE time zone. We use the default PERIODIC time segment [TYPE] We created this time segments file with these lines: label,start_time,length,repeats_on,repeats_value daily,00:00:00,23H 59M 59S,every_day,0 night,001:00:00,5H 59M 59S,every_day,0 We set [USE] to aware_device to tell RAPIDS to process sensor data collected with the AWARE Framework stored in CSV files. We used the default [FOLDER] for awre_csv since we already stored our test calls.csv file there We changed [CONTAINER] to calls.csv to process our test call data. We flipped [COMPUTE] to True to extract call behavioral features using the RAPIDS feature provider. Run RAPIDS ./rapids -j1 The call features for daily and morning time segments will be in data/processed/features/all_participants/all_sensor_features.csv","title":"Minimal Working Example"}]} \ No newline at end of file diff --git a/1.3/setup/configuration/index.html b/1.3/setup/configuration/index.html new file mode 100644 index 00000000..ac25b43d --- /dev/null +++ b/1.3/setup/configuration/index.html @@ -0,0 +1,3063 @@ + + + + + + + + + + + + + + + + + + + + + + Configuration - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Configuration

+

You need to follow these steps to configure your RAPIDS deployment before you can extract behavioral features.

+
    +
  1. Verify RAPIDS can process your data streams
  2. +
  3. Create your participants files
  4. +
  5. Select what time segments you want to extract features on
  6. +
  7. Select the timezone of your study
  8. +
  9. Configure your data streams
  10. +
  11. Select what sensors and features you want to process
  12. +
+

When you are done with this configuration, go to executing RAPIDS.

+
+

Hint

+

Every time you see config["KEY"] or [KEY] in these docs, we are referring to the corresponding key in the config.yaml file.

+
+
+

Supported data streams

+

A data stream refers to sensor data collected using a specific device with a specific format and stored in a specific container. For example, the aware_mysql data stream handles smartphone data (device) collected with the AWARE Framework (format) stored in a MySQL database (container).

+

Check the table in introduction to data streams to know what data streams we support. If your data stream is supported, continue to the next configuration section, you will use its label later in this guide (e.g. aware_mysql). If your steam is not supported, but you want to implement it, follow the tutorial to add support for new data streams and open a new discussion in Github with any questions.

+
+

Participant files

+

Participant files link together multiple devices (smartphones and wearables) to specific participants and identify them throughout RAPIDS. You can create these files manually or automatically. Participant files are stored in data/external/participant_files/pxx.yaml and follow a unified structure.

+
Remember to modify the config.yaml file with your PIDS

The list PIDS in config.yaml needs to have the participant file names of the people you want to process. For example, if you created p01.yaml, p02.yaml and p03.yaml files in /data/external/participant_files/, then PIDS should be: +

PIDS: [p01, p02, p03] 
+

+
+
Optional: Migrating participants files with the old format

If you were using the pre-release version of RAPIDS with participant files in plain text (as opposed to yaml), you could run the following command, and your old files will be converted into yaml files stored in data/external/participant_files/

+
python tools/update_format_participant_files.py
+
+
+

Structure of participants files

+
Example of the structure of a participant file

In this example, the participant used an android phone, an ios phone, a Fitbit device, and an Empatica device throughout the study between April 23rd, 2020, and October 28th, 2020

+

If your participants didn’t use a [PHONE], [FITBIT] or [EMPATICA] device, it is not necessary to include that section in their participant file. In other words, you can analyze data from 1 or more devices per participant.

+
PHONE:
+  DEVICE_IDS: [a748ee1a-1d0b-4ae9-9074-279a2b6ba524, dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43]
+  PLATFORMS: [android,ios]
+  LABEL: test01
+  START_DATE: 2020-04-23
+  END_DATE: 2020-10-28
+FITBIT:
+  DEVICE_IDS: [fitbit1]
+  LABEL: test01
+  START_DATE: 2020-04-23
+  END_DATE: 2020-10-28
+EMPATICA:
+  DEVICE_IDS: [empatica1]
+  LABEL: test01
+  START_DATE: 2020-04-23
+  END_DATE: 2020-10-28
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Key                     Description
[DEVICE_IDS]An array of the strings that uniquely identify each smartphone, you can have more than one for when participants changed phones in the middle of the study.
[PLATFORMS]An array that specifies the OS of each smartphone in [DEVICE_IDS] , use a combination of android or ios (we support participants that changed platforms in the middle of your study!). You can set [PLATFORMS]: [infer], and RAPIDS will infer them automatically (each phone data stream infer this differently, e.g., aware_mysql uses the aware_device table).
[LABEL]A string that is used in reports and visualizations.
[START_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
[END_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                     Description
[DEVICE_IDS]An array of the strings that uniquely identify each Fitbit, you can have more than one in case the participant changed devices in the middle of the study.
[LABEL]A string that is used in reports and visualizations.
[START_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
[END_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Key                     Description
[DEVICE_IDS]An array of the strings that uniquely identify each Empatica device used by this participant. Since the most common use case involves having multiple zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid)
[LABEL]A string that is used in reports and visualizations.
[START_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected after this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
[END_DATE]A string with format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS. Only data collected before this date-time will be included in the analysis. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
+
+
+

Automatic creation of participant files

+

You can use a CSV file with a row per participant to automatically create participant files.

+
AWARE_DEVICE_TABLE was deprecated

In previous versions of RAPIDS, you could create participant files automatically using the aware_device table. We deprecated this option, but you can still achieve the same results if you export the output of the following SQL query as a CSV file and follow the instructions below:

+
SELECT device_id, device_id as fitbit_id, CONCAT("p", _id) as empatica_id, CONCAT("p", _id) as pid, if(brand = "iPhone", "ios", "android") as platform, CONCAT("p", _id)  as label, DATE_FORMAT(FROM_UNIXTIME((timestamp/1000)- 86400), "%Y-%m-%d") as start_date, CURRENT_DATE as end_date from aware_device order by _id;
+
+
+

In your config.yaml:

+
    +
  1. Set CSV_FILE_PATH to a CSV file path that complies with the specs described below
  2. +
  3. Set the devices (PHONE, FITBIT, EMPATICA) [ADD] flag to TRUE depending on what devices you used in your study.
  4. +
+
CREATE_PARTICIPANT_FILES:
+  CSV_FILE_PATH: "your_path/to_your.csv"
+  PHONE_SECTION:
+    ADD: TRUE # or FALSE
+    IGNORED_DEVICE_IDS: []
+  FITBIT_SECTION:
+    ADD: TRUE # or FALSE
+    IGNORED_DEVICE_IDS: []
+  EMPATICA_SECTION:
+    ADD: TRUE # or FALSE
+    IGNORED_DEVICE_IDS: []
+
+

Your CSV file ([CSV_FILE_PATH]) should have the following columns (headers), but the values within each column can be empty:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ColumnDescription
device_idPhone device id. Separate multiple ids with ;
fitbit_idFitbit device id. Separate multiple ids with ;
empatica_idEmpatica device id. Since the most common use case involves having various zip files from a single device for each person, set this device id to an arbitrary string (we usually use their pid)
pidUnique identifiers with the format pXXX (your participant files will be named with this string)
platformUse android, ios or infer as explained above, separate values with ;
labelA human-readable string that is used in reports and visualizations.
start_dateA string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
end_dateA string with format YYY-MM-DD or YYYY-MM-DD HH:MM:SS. By default, YYYY-MM-DD is interpreted as YYYY-MM-DD 00:00:00.
+
+

Example

+

We added white spaces to this example to make it easy to read, but you don’t have to.

+
device_id                                                                ,fitbit_id, empatica_id ,pid ,label ,platform    ,start_date ,end_date
+a748ee1a-1d0b-4ae9-9074-279a2b6ba524;dsadas-2324-fgsf-sdwr-gdfgs4rfsdf43 ,fitbit1  , p01         ,p01 ,julio ,android;ios ,2020-01-01 ,2021-01-01
+4c4cf7a1-0340-44bc-be0f-d5053bf7390c                                     ,fitbit2  , p02         ,p02 ,meng  ,ios         ,2021-01-01 ,2022-01-01
+
+
+

Then run

+
snakemake -j1 create_participants_files
+
+
+

Time Segments

+

Time segments (or epochs) are the time windows on which you want to extract behavioral features. For example, you might want to process data every day, every morning, or only during weekends. RAPIDS offers three categories of time segments that are flexible enough to cover most use cases: frequency (short time windows every day), periodic (arbitrary time windows on any day), and event (arbitrary time windows around events of interest). See also our examples.

+
+

These segments are computed every day, and all have the same duration (for example, 30 minutes). Set the following keys in your config.yaml

+
TIME_SEGMENTS: &time_segments
+  TYPE: FREQUENCY
+  FILE: "data/external/your_frequency_segments.csv"
+  INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE
+
+

The file pointed by [TIME_SEGMENTS][FILE] should have the following format and only have 1 row.

+ + + + + + + + + + + + + + + + + +
ColumnDescription
labelA string that is used as a prefix in the name of your time segments
lengthAn integer representing the duration of your time segments in minutes
+
+

Example

+
label,length
+thirtyminutes,30
+
+

This configuration will compute 48 time segments for every day when any data from any participant was sensed. For example:

+
start_time,length,label
+00:00,30,thirtyminutes0000
+00:30,30,thirtyminutes0001
+01:00,30,thirtyminutes0002
+01:30,30,thirtyminutes0003
+...
+
+
+
+
+

These segments can be computed every day or on specific days of the week, month, quarter, and year. Their minimum duration is 1 minute, but they can be as long as you want. Set the following keys in your config.yaml.

+
TIME_SEGMENTS: &time_segments
+  TYPE: PERIODIC
+  FILE: "data/external/your_periodic_segments.csv"
+  INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # or TRUE
+
+

If [INCLUDE_PAST_PERIODIC_SEGMENTS] is set to TRUE, RAPIDS will consider instances of your segments back enough in the past to include the first row of data of each participant. For example, if the first row of data from a participant happened on Saturday, March 7th, 2020, and the requested segment duration is 7 days starting on every Sunday, the first segment to be considered would begin on Sunday, March 1st if [INCLUDE_PAST_PERIODIC_SEGMENTS] is TRUE or on Sunday, March 8th if FALSE.

+

The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ColumnDescription
labelA string that is used as a prefix in the name of your time segments. It has to be unique between rows
start_timeA string with format HH:MM:SS representing the starting time of this segment on any day
lengthA string representing the length of this segment. It can have one or more of the following strings XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S
repeats_onOne of the following options every_day, wday, qday, mday, and yday. The last four represent a week, quarter, month, and year day
repeats_valueAn integer complementing repeats_on. If you set repeats_on to every_day, set this to 0, otherwise 1-7 represent a wday starting from Mondays, 1-31 represent a mday, 1-91 represent a qday, and 1-366 represent a yday
+
+

Example

+
label,start_time,length,repeats_on,repeats_value
+daily,00:00:00,23H 59M 59S,every_day,0
+morning,06:00:00,5H 59M 59S,every_day,0
+afternoon,12:00:00,5H 59M 59S,every_day,0
+evening,18:00:00,5H 59M 59S,every_day,0
+night,00:00:00,5H 59M 59S,every_day,0
+
+

This configuration will create five segment instances (daily, morning, afternoon, evening, night) on any given day (every_day set to 0). The daily segment will start at midnight and last 23:59:59; the other four segments will begin at 6am, 12pm, 6pm, and 12am, respectively, and last for 05:59:59.

+
+
+
+

These segments can be computed before or after an event of interest (defined as any UNIX timestamp). Their minimum duration is 1 minute, but they can be as long as you want. The start of each segment can be shifted backward or forwards from the specified timestamp. Set the following keys in your config.yaml.

+
TIME_SEGMENTS: &time_segments
+  TYPE: EVENT
+  FILE: "data/external/your_event_segments.csv"
+  INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE # or TRUE
+
+

The file pointed by [TIME_SEGMENTS][FILE] should have the following format and can have multiple rows.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ColumnDescription
labelA string that is used as a prefix in the name of your time segments. If labels are unique, every segment is independent; if two or more segments have the same label, their data will be grouped when computing auxiliary data for features like the most frequent contact for calls (the most frequent contact will be calculated across all these segments). There cannot be two overlapping event segments with the same label (RAPIDS will throw an error)
event_timestampA UNIX timestamp that represents the moment an event of interest happened (clinical relapse, survey, readmission, etc.). The corresponding time segment will be computed around this moment using length, shift, and shift_direction
lengthA string representing the length of this segment. It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S
shiftA string representing the time shift from event_timestamp. It can have one or more of the following keys XXD XXH XXM XXS to represent days, hours, minutes, and seconds. For example, 7D 23H 59M 59S. Use this value to change the start of a segment with respect to its event_timestamp. For example, set this variable to 1H to create a segment that starts 1 hour from an event of interest (shift_direction determines if it’s before or after).
shift_directionAn integer representing whether the shift is before (-1) or after (1) an event_timestamp
device_idThe device id (smartphone or Fitbit) to whom this segment belongs to. You have to create a line in this event segment file for each event of a participant that you want to analyze. If you have participants with multiple device ids, you can choose any of them
+
+

Example

+
label,event_timestamp,length,shift,shift_direction,device_id
+stress1,1587661220000,1H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+stress2,1587747620000,4H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+stress3,1587906020000,3H,5M,1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+stress4,1584291600000,7H,4H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+stress5,1588172420000,9H,5M,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+mood,1587661220000,1H,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+mood,1587747620000,1D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+mood,1587906020000,7D,0,0,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+
+

This example will create eight segments for a single participant (a748ee1a...), five independent stressX segments with various lengths (1,4,3,7, and 9 hours). Segments stress1, stress3, and stress5 are shifted forwards by 5 minutes, and stress2 and stress4 are shifted backward by 4 hours (that is, if the stress4 event happened on March 15th at 1pm EST (1584291600000), the time segment will start on that day at 9am and end at 4pm).

+

The three mood segments are 1 hour, 1 day, and 7 days long and have no shift. In addition, these mood segments are grouped together, meaning that although RAPIDS will compute features on each one of them, some information for such computation will be extracted from all three segments, for example, the phone contact that called a participant the most, or the location clusters visited by a participant.

+
+
Date time labels of event segments

In the final feature file, you will find a row per event segment. The local_segment column of each row has a label, a start date-time string, and an end date-time string.

+
weeklysurvey2060#2020-09-12 01:00:00,2020-09-18 23:59:59
+
+

All sensor data is always segmented based on timestamps, and the date-time strings are attached for informative purposes. For example, you can plot your features based on these strings.

+

When you configure RAPIDS to work with a single time zone, such time zone code will be used to convert start/end timestamps (the ones you typed in the event segments file) into start/end date-time strings. However, when you configure RAPIDS to work with multiple time zones, RAPIDS will use the most common time zone across all devices of every participant to do the conversion. The most common time zone is the one in which a participant spent the most time.

+

In practical terms, this means that the date-time strings of event segments that happened in uncommon time zones will have shifted start/end date-time labels. However, the data within each segment was correctly filtered based on timestamps.

+
+
+
+

Segment Examples

+
+

Use the following Frequency segment file to create 288 (12 * 60 * 24) 5-minute segments starting from midnight of every day in your study +

label,length
+fiveminutes,5
+

+
+
+

Use the following Periodic segment file to create daily segments starting from midnight of every day in your study +

label,start_time,length,repeats_on,repeats_value
+daily,00:00:00,23H 59M 59S,every_day,0
+

+
+
+

Use the following Periodic segment file to create morning segments starting at 06:00:00 and ending at 11:59:59 of every day in your study +

label,start_time,length,repeats_on,repeats_value
+morning,06:00:00,5H 59M 59S,every_day,0
+

+
+
+

Use the following Periodic segment file to create overnight segments starting at 20:00:00 and ending at 07:59:59 (next day) of every day in your study +

label,start_time,length,repeats_on,repeats_value
+morning,20:00:00,11H 59M 59S,every_day,0
+

+
+
+

Use the following Periodic segment file to create non-overlapping weekly segments starting at midnight of every Monday in your study +

label,start_time,length,repeats_on,repeats_value
+weekly,00:00:00,6D 23H 59M 59S,wday,1
+
+Use the following Periodic segment file to create overlapping weekly segments starting at midnight of every day in your study +
label,start_time,length,repeats_on,repeats_value
+weekly,00:00:00,6D 23H 59M 59S,every_day,0
+

+
+
+

Use the following Periodic segment file to create week-end segments starting at midnight of every Saturday in your study +

label,start_time,length,repeats_on,repeats_value
+weekend,00:00:00,1D 23H 59M 59S,wday,6
+

+
+
+

Use the following Event segment file to create two 2-hour segments that start 1 hour before surveys answered by 3 participants +

label,event_timestamp,length,shift,shift_direction,device_id
+survey1,1587661220000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+survey2,1587747620000,2H,1H,-1,a748ee1a-1d0b-4ae9-9074-279a2b6ba524
+survey1,1587906020000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr
+survey2,1584291600000,2H,1H,-1,rqtertsd-43ff-34fr-3eeg-efe4fergregr
+survey1,1588172420000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3
+survey2,1584291600000,2H,1H,-1,klj34oi2-8frk-2343-21kk-324ljklewlr3
+

+
+
+
+

Timezone of your study

+

Single timezone

+

If your study only happened in a single time zone or you want to ignore short trips of your participants to different time zones, select the appropriate code from this list and change the following config key. Double-check your timezone code pick; for example, US Eastern Time is America/New_York, not EST.

+
TIMEZONE: 
+    TYPE: SINGLE
+    TZCODE: America/New_York
+
+

Multiple timezones

+

If your participants lived in different time zones or they traveled across time zones, and you know when participants’ devices were in a specific time zone, RAPIDS can use this data to process your data streams with the correct date-time. You need to provide RAPIDS with the time zone data in a CSV file ([TZCODES_FILE]) in the format described below.

+
TIMEZONE: 
+    TYPE: MULTIPLE
+    SINGLE:
+      TZCODE: America/New_York
+    MULTIPLE:
+      TZCODES_FILE: path_to/time_zones_csv.file
+      IF_MISSING_TZCODE: STOP
+      DEFAULT_TZCODE: America/New_York
+      FITBIT: 
+        ALLOW_MULTIPLE_TZ_PER_DEVICE: False
+        INFER_FROM_SMARTPHONE_TZ: False
+
+

Parameters for [TIMEZONE]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Parameter                                                           Description
[TYPE]Either SINGLE or MULTIPLE as explained above
[SINGLE][TZCODE]The time zone code from this list to be used across all devices
[MULTIPLE][TZCODES_FILE]A CSV file containing the time zones in which participants’ devices sensed data (see the required format below). Multiple devices can be linked to the same person. Read more in Participants Files
[MULTIPLE][IF_MISSING_TZCODE]When a device is missing from [TZCODES_FILE] Set this flag to STOP to stop RAPIDS execution and show an error, or to USE_DEFAULT to assign the time zone specified in [DEFAULT_TZCODE] to any such devices
[MULTIPLE][FITBIT][ALLOW_MULTIPLE_TZ_PER_DEVICE]You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in “How does RAPIDS handle Fitbit devices?” below.
[MULTIPLE][FITBIT][INFER_FROM_SMARTPHONE_TZ]You only need to care about this flag if one or more Fitbit devices sensed data in one or more time zones, and you want RAPIDS to take into account this in its feature computation. Read more in “How does RAPIDS handle Fitbit devices?” below.
+
Format of TZCODES_FILE

TZCODES_FILE has three columns and a row for each time zone a device visited (a device can be a smartphone or wearable (Fitbit/Empatica)):

+ + + + + + + + + + + + + + + + + + + + + +
ColumnDescription
device_idA string that uniquely identifies a smartphone or wearable
tzcodeA string with the appropriate code from this list that represents the time zone where the device sensed data
timestampA UNIX timestamp indicating when was the first time this device_id sensed data in tzcode
+
device_id,                            tzcode,              timestamp
+13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/New_York,     1587500000000
+13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Mexico_City,  1587600000000
+13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles,  1587700000000
+65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam,     1587100000000
+65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Berlin,        1587200000000
+65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam,     1587300000000
+
+

Using this file, RAPDIS will create time zone intervals per device, for example for 13dbc8a3-dae3-4834-823a-4bc96a7d459d:

+
    +
  • Interval 1 [1587500000000, 1587599999999] for America/New_York
  • +
  • Interval 2 [1587600000000, 1587699999999] for America/Mexico_City
  • +
  • Interval 3 [1587700000000, now] for America/Los_Angeles
  • +
+

Any sensor data row from a device will be assigned a timezone if it falls within that interval, for example:

+
    +
  • A screen row sensed at 1587533333333 will be assigned to America/New_York because it falls within Interval 1
  • +
  • A screen row sensed at 1587400000000 will be discarded because it was logged outside any interval.
  • +
+
+
Can I get the TZCODES_FILE from the time zone table collected automatically by the AWARE app?

Sure. You can put your timezone table (timezone.csv) collected by the AWARE app under data/external folder and run: +

python tools/create_multi_timezones_file.py
+
+The TZCODES_FILE will be saved as data/external/multiple_timezones.csv.

+
+
What happens if participant X lives in Los Angeles but participant Y lives in Amsterdam and they both stayed there during my study?

Add a row per participant and set timestamp to 0: +

device_id,                            tzcode,              timestamp
+13dbc8a3-dae3-4834-823a-4bc96a7d459d, America/Los_Angeles,  0
+65sa66a5-2d2d-4524-946v-44ascbv4sad7, Europe/Amsterdam,     0
+

+
+
What happens if I forget to add a timezone for one or more devices?

It depends on [IF_MISSING_TZCODE].

+

If [IF_MISSING_TZCODE] is set to STOP, RAPIDS will stop its execution and show you an error message.

+

If [IF_MISSING_TZCODE] is set to USE_DEFAULT, it will assign the time zone specified in [DEFAULT_TZCODE] to any devices with missing time zone information in [TZCODES_FILE]. This is helpful if only a few of your participants had multiple timezones, and you don’t want to specify the same time zone for the rest.

+
+
How does RAPIDS handle Fitbit devices?

Fitbit devices are not time zone aware, and they always log data with a local date-time string.

+
    +
  • +

    When none of the Fitbit devices in your study changed time zones (e.g., p01 was always in New York and p02 was always in Amsterdam), you can set a single time zone per Fitbit device id along with a timestamp of 0 (you can still assign multiple time zones to smartphone device ids) +

    device_id, tzcode,              timestamp
    +fitbit123, America/New_York,     0
    +fitbit999, Europe/Amsterdam,     0
    +

    +
  • +
  • +

    On the other hand, when at least one of your Fitbit devices changed time zones AND you want RAPIDS to take into account these changes, you need to set [ALLOW_MULTIPLE_TZ_PER_DEVICE] to True. You have to manually allow this option because you need to be aware it can produce inaccurate features around the times when time zones changed. This is because we cannot know precisely when the Fitbit device detected and processed the time zone change.

    +

    If you want to ALLOW_MULTIPLE_TZ_PER_DEVICE, you will need to add any time zone changes per device in the TZCODES_FILE as explained above. You could obtain this data by hand, but if your participants also used a smartphone during your study, you can use their time zone logs. Recall that in RAPIDS, every participant is represented with a participant file pXX.yaml, this file links together multiple devices, and we will use it to know what smartphone time zone data should be applied to Fitbit devices. Thus set INFER_FROM_SMARTPHONE_TZ to TRUE, if you have included smartphone time zone data in your TZCODE_FILE and want to make a participant’s Fitbit data time zone aware with their respective smartphone data.

    +
  • +
+
+
+

Data Stream Configuration

+

Modify the following keys in your config.yaml depending on the data stream you want to process.

+
+

Set [PHONE_DATA_STREAMS][TYPE] to the smartphone data stream you want to process (e.g. aware_mysql) and configure its parameters (e.g. [DATABASE_GROUP]). Ignore the parameters of streams you are not using (e.g. [FOLDER] of aware_csv).

+
PHONE_DATA_STREAMS:
+  USE: aware_mysql
+
+  # AVAILABLE:
+  aware_mysql:
+    DATABASE_GROUP: MY_GROUP
+
+  aware_csv:
+    FOLDER: data/external/aware_csv
+
+
+ + + + + + + + + + + + + +
KeyDescription
[DATABASE_GROUP]A database credentials group. Read the instructions below to set it up
+
Setting up a DATABASE_GROUP and its connection credentials.
    +
  1. +

    If you haven’t done so, create an empty file called credentials.yaml in your RAPIDS root directory:

    +
  2. +
  3. +

    Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database):

    +
    MY_GROUP:
    +  database: MY_DATABASE
    +  host: MY_HOST
    +  password: MY_PASSWORD
    +  port: 3306
    +  user: MY_USER
    +
    +
  4. +
  5. +

    Notes

    +
      +
    1. +

      The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use.

      +
    2. +
    3. +

      Indentation matters

      +
    4. +
    5. +

      You can have more than one credentials group in credentials.yaml

      +
    6. +
    +
  6. +
+
Upgrading from ./.env from RAPIDS 0.x

In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options:

+
    +
  1. +

    Migrate your credentials by hand:

    +
    +
    [MY_GROUP]
    +user=MY_USER
    +password=MY_PASSWORD
    +host=MY_HOST
    +port=3306
    +database=MY_DATABASE
    +
    +
    +
    +
    MY_GROUP:
    +  user: MY_USER
    +  password: MY_PASSWORD
    +  host: MY_HOST
    +  port: 3306
    +  database: MY_DATABASE
    +
    +
    +
    +
  2. +
  3. +

    Use the migration script we provide (make sure your conda environment is active):

    +
    python tools/update_format_env.py
    +
    +
  4. +
+
+
Connecting to localhost (host machine) from inside our docker container.

If you are using RAPIDS’ docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost. In a Linux host, you need to run our docker container using docker run --network="host" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine.

+
+
+
+
+ + + + + + + + + + + + + +
KeyDescription
[FOLDER]Folder where you have to place a CSV file per phone sensor. Each file has to contain all the data from every participant you want to process.
+
+
+
+
+

Set [FITBIT_DATA_STREAMS][TYPE] to the Fitbit data stream you want to process (e.g. fitbitjson_mysql) and configure its parameters (e.g. [DATABASE_GROUP]). Ignore the parameters of the other streams you are not using (e.g. [FOLDER] of aware_csv).

+
+

Warning

+

You will probably have to tell RAPIDS the name of the columns where you stored your Fitbit data. To do this, modify your chosen stream’s format.yaml column mappings to match your raw data column names.

+
+
FITBIT_DATA_STREAMS:
+  USE: fitbitjson_mysql
+
+  # AVAILABLE:
+  fitbitjson_mysql:
+    DATABASE_GROUP: MY_GROUP
+    SLEEP_SUMMARY_LAST_NIGHT_END: 660
+
+  fitbitjson_csv:
+    FOLDER: data/external/fitbit_csv
+    SLEEP_SUMMARY_LAST_NIGHT_END: 660
+
+  fitbitparsed_mysql:
+    DATABASE_GROUP: MY_GROUP
+    SLEEP_SUMMARY_LAST_NIGHT_END: 660
+
+  fitbitparsed_csv:
+    FOLDER: data/external/fitbit_csv
+    SLEEP_SUMMARY_LAST_NIGHT_END: 660
+
+
+

This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitjson_mysql.

+ + + + + + + + + + + + + + + + + +
KeyDescription
[DATABASE_GROUP]A database credentials group. Read the instructions below to set it up
[SLEEP_SUMMARY_LAST_NIGHT_END]Segments are assigned based on this parameter. Any sleep episodes that start between today’s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow’s LNE are regarded as today’s sleep episodes. While today’s bedtime is based on today’s sleep episodes, today’s wake time is based on yesterday’s sleep episodes.
+
Setting up a DATABASE_GROUP and its connection credentials.
    +
  1. +

    If you haven’t done so, create an empty file called credentials.yaml in your RAPIDS root directory:

    +
  2. +
  3. +

    Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database):

    +
    MY_GROUP:
    +  database: MY_DATABASE
    +  host: MY_HOST
    +  password: MY_PASSWORD
    +  port: 3306
    +  user: MY_USER
    +
    +
  4. +
  5. +

    Notes

    +
      +
    1. +

      The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use.

      +
    2. +
    3. +

      Indentation matters

      +
    4. +
    5. +

      You can have more than one credentials group in credentials.yaml

      +
    6. +
    +
  6. +
+
Upgrading from ./.env from RAPIDS 0.x

In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options:

+
    +
  1. +

    Migrate your credentials by hand:

    +
    +
    [MY_GROUP]
    +user=MY_USER
    +password=MY_PASSWORD
    +host=MY_HOST
    +port=3306
    +database=MY_DATABASE
    +
    +
    +
    +
    MY_GROUP:
    +  user: MY_USER
    +  password: MY_PASSWORD
    +  host: MY_HOST
    +  port: 3306
    +  database: MY_DATABASE
    +
    +
    +
    +
  2. +
  3. +

    Use the migration script we provide (make sure your conda environment is active):

    +
    python tools/update_format_env.py
    +
    +
  4. +
+
+
Connecting to localhost (host machine) from inside our docker container.

If you are using RAPIDS’ docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost. In a Linux host, you need to run our docker container using docker run --network="host" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine.

+
+
+
+
+

This data stream processes Fitbit data inside a JSON column obtained from the Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitjson_csv.

+ + + + + + + + + + + + + + + + + +
KeyDescription
[FOLDER]Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process.
[SLEEP_SUMMARY_LAST_NIGHT_END]Segments are assigned based on this parameter. Any sleep episodes that start between today’s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow’s LNE are regarded as today’s sleep episodes. While today’s bedtime is based on today’s sleep episodes, today’s wake time is based on yesterday’s sleep episodes.
+
+
+

This data stream process Fitbit data stored in multiple columns after being parsed from the JSON column returned by Fitbit API and stored in a MySQL database. Read more about its column mappings and mutations in fitbitparsed_mysql.

+ + + + + + + + + + + + + + + + + +
KeyDescription
[DATABASE_GROUP]A database credentials group. Read the instructions below to set it up
[SLEEP_SUMMARY_LAST_NIGHT_END]Segments are assigned based on this parameter. Any sleep episodes that start between today’s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow’s LNE are regarded as today’s sleep episodes. While today’s bedtime is based on today’s sleep episodes, today’s wake time is based on yesterday’s sleep episodes.
+
Setting up a DATABASE_GROUP and its connection credentials.
    +
  1. +

    If you haven’t done so, create an empty file called credentials.yaml in your RAPIDS root directory:

    +
  2. +
  3. +

    Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database):

    +
    MY_GROUP:
    +  database: MY_DATABASE
    +  host: MY_HOST
    +  password: MY_PASSWORD
    +  port: 3306
    +  user: MY_USER
    +
    +
  4. +
  5. +

    Notes

    +
      +
    1. +

      The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use.

      +
    2. +
    3. +

      Indentation matters

      +
    4. +
    5. +

      You can have more than one credentials group in credentials.yaml

      +
    6. +
    +
  6. +
+
Upgrading from ./.env from RAPIDS 0.x

In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options:

+
    +
  1. +

    Migrate your credentials by hand:

    +
    +
    [MY_GROUP]
    +user=MY_USER
    +password=MY_PASSWORD
    +host=MY_HOST
    +port=3306
    +database=MY_DATABASE
    +
    +
    +
    +
    MY_GROUP:
    +  user: MY_USER
    +  password: MY_PASSWORD
    +  host: MY_HOST
    +  port: 3306
    +  database: MY_DATABASE
    +
    +
    +
    +
  2. +
  3. +

    Use the migration script we provide (make sure your conda environment is active):

    +
    python tools/update_format_env.py
    +
    +
  4. +
+
+
Connecting to localhost (host machine) from inside our docker container.

If you are using RAPIDS’ docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost. In a Linux host, you need to run our docker container using docker run --network="host" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine.

+
+
+
+
+

This data stream process Fitbit data stored in multiple columns (plain text) after being parsed from the JSON column returned by Fitbit API and stored in a CSV file. Read more about its column mappings and mutations in fitbitparsed_csv.

+ + + + + + + + + + + + + + + + + +
KeyDescription
[FOLDER]Folder where you have to place a CSV file per Fitbit sensor. Each file has to contain all the data from every participant you want to process.
[SLEEP_SUMMARY_LAST_NIGHT_END]Segments are assigned based on this parameter. Any sleep episodes that start between today’s SLEEP_SUMMARY_LAST_NIGHT_END (LNE) and tomorrow’s LNE are regarded as today’s sleep episodes. While today’s bedtime is based on today’s sleep episodes, today’s wake time is based on yesterday’s sleep episodes.
+
+
+
+
+

Set [USE] to the Empatica data stream you want to use; see the table in introduction to data streams. Configure any parameters as indicated below.

+
EMPATICA_DATA_STREAMS:
+  USE: empatica_zip
+
+  # AVAILABLE:
+  empatica_zip: 
+    FOLDER: data/external/empatica
+
+
+ + + + + + + + + + + + + +
KeyDescription
[FOLDER]The relative path to a folder containing one subfolder per participant. The name of a participant folder should match their device_id assigned in their participant file. Each participant folder can have one or more zip files with any name; in other words, the sensor data in those zip files belong to a single participant. The zip files are automatically generated by Empatica and have a CSV file per sensor (ACC, HR, TEMP, EDA, BVP, TAGS). All CSV files of the same type contained in one or more zip files are uncompressed, parsed, sorted by timestamp, and joined together.
+
Example of an EMPATICA FOLDER

In the file tree below, we want to process three participants’ data: p01, p02, and p03. p01 has two zip files, p02 has only one zip file, and p03 has three zip files. Each zip has a CSV file per sensor that is joined together and processed by RAPIDS.

+
data/ # this folder exists in the root RAPIDS folder
+  external/
+    empatica/
+      p01/
+        file1.zip
+        file2.zip
+      p02/
+        aaaa.zip
+      p03/
+        t1.zip
+        t2.zip
+        t3.zip
+
+
+
+
+
+
+
+

Sensor and Features to Process

+

Finally, you need to modify the config.yaml section of the sensors you want to extract behavioral features from. All sensors follow the same naming nomenclature (DEVICE_SENSOR) and parameter structure which we explain in the Behavioral Features Introduction.

+
+

Done

+

Head over to Execution to learn how to execute RAPIDS.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/setup/execution/index.html b/1.3/setup/execution/index.html new file mode 100644 index 00000000..9f4b50d5 --- /dev/null +++ b/1.3/setup/execution/index.html @@ -0,0 +1,1915 @@ + + + + + + + + + + + + + + + + + + + + + + Execution - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Execution

+

After you have installed and configured RAPIDS, use the following command to execute it.

+
./rapids -j1
+
+
+

Ready to extract behavioral features

+

If you are ready to extract features head over to the Behavioral Features Introduction

+
+
+

We wrap Snakemake

+

The script ./rapids is a wrapper around Snakemake so you can pass any parameters that Snakemake accepts (e.g. -j1).

+
+
+

Updating RAPIDS output after modifying config.yaml

+

Any changes to the config.yaml file will be applied automatically and only the relevant files will be updated. This means that after modifying the features list for PHONE_MESSAGE for example, RAPIDS will execute the script that computes MESSAGES features and update its output file.

+
+
+

Multi-core

+

You can run RAPIDS over multiple cores by modifying the -j argument (e.g. use -j8 to use 8 cores). However, take into account that this means multiple sensor datasets for different participants will be loaded in memory at the same time. If RAPIDS crashes because it ran out of memory, reduce the number of cores and try again.

+

As reference, we have run RAPIDS over 12 cores and 32 Gb of RAM without problems for a study with 200 participants with 14 days of low-frequency smartphone data (no accelerometer, gyroscope, or magnetometer).

+
+
+

Deleting RAPIDS output

+

If you want to delete all the output files RAPIDS produces, you can execute the following command:

+
./rapids -j1 --delete-all-output
+
+
+
+

Forcing a complete rerun or updating your raw data in RAPIDS

+

If you want to update your raw data or rerun the whole pipeline from scratch, run the following commands:

+
./rapids -j1 --delete-all-output
+./rapids -j1
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/setup/installation/index.html b/1.3/setup/installation/index.html new file mode 100644 index 00000000..d978c046 --- /dev/null +++ b/1.3/setup/installation/index.html @@ -0,0 +1,2115 @@ + + + + + + + + + + + + + + + + + + + + + + Installation - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Installation

+

You can install RAPIDS using Docker (the fastest), or native instructions for MacOS and Linux (Ubuntu). Windows is supported through Docker or WSL.

+
+
    +
  1. +

    Install Docker

    +
  2. +
  3. +

    Pull our RAPIDS container +

    docker pull moshiresearch/rapids:latest
    +

    +
  4. +
  5. +

    Run RAPIDS' container (after this step is done you should see a + prompt in the main RAPIDS folder with its python environment active)

    +
    docker run -it moshiresearch/rapids:latest
    +
    +
  6. +
  7. +

    Pull the latest version of RAPIDS

    +
    git pull
    +
    +
  8. +
  9. +

    Make RAPIDS script executable +

    chmod +x rapids
    +

    +
  10. +
  11. +

    Check that RAPIDS is working +

    ./rapids -j1
    +

    +
  12. +
  13. +

    Optional. You can edit RAPIDS files with vim but we recommend using Visual Studio Code and its Remote Containers extension

    +
    How to configure Remote Containers extension
      +
    • Make sure RAPIDS container is running
    • +
    • Install the Remote - Containers extension
    • +
    • Go to the Remote Explorer panel on the left hand sidebar
    • +
    • On the top right dropdown menu choose Containers
    • +
    • Double click on the moshiresearch/rapids container in theCONTAINERS tree
    • +
    • A new VS Code session should open on RAPIDS main folder inside the container.
    • +
    +
    +
  14. +
+
+

Warning

+

If you installed RAPIDS using Docker for Windows on Windows 10, the container will have limits on the amount of RAM it can use. If you find that RAPIDS crashes due to running out of memory, increase this limit.

+
+
+
+

We tested these instructions in Catalina and Big Sur

+
M1 Macs

RAPIDS can run on M1 Macs, the only changes as of Feb 21, 2021 are:

+
    +
  • R needs to be installed via brew under Rosetta (x86 arch) due to incompatibility issues with some R libraries. To do this, run your terminal via Rosetta, then proceed with the usual brew installation command. Use x86 brew to install R and restore RAPIDS’ packages (snakemake -j1 renv_install & snakemake -j1 renv_restore).
  • +
  • There is a bug related to timezone codes. We set the correct TZ_DIR in renv/activate.R (line #19) Sys.setenv("TZDIR" = file.path(R.home(), "share", "zoneinfo")) (RAPIDS does this automatically).
  • +
+
+
    +
  1. +

    Install brew

    +
  2. +
  3. +

    Install MySQL

    +
    brew install mysql
    +brew services start mysql
    +
    +
  4. +
  5. +

    Install R 4.0, pandoc and rmarkdown. If you have other instances of R, we recommend uninstalling them

    +
    brew install r
    +brew install pandoc
    +Rscript --vanilla -e 'install.packages("rmarkdown", repos="http://cran.us.r-project.org")'
    +
    +
  6. +
  7. +

    Install miniconda (restart your terminal afterwards)

    +
    brew cask install miniconda
    +conda init zsh # (or conda init bash)
    +
    +
  8. +
  9. +

    Clone our repo

    +
    git clone https://github.com/carissalow/rapids
    +
    +
  10. +
  11. +

    Create a python virtual environment

    +
    cd rapids
    +conda env create -f environment.yml -n rapids
    +conda activate rapids
    +
    +
  12. +
  13. +

    Install R packages and virtual environment:

    +
    snakemake -j1 renv_install
    +snakemake -j1 renv_restore
    +
    +
    +

    Note

    +

    This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion.

    +
    +
  14. +
  15. +

    Make RAPIDS script executable +

    chmod +x rapids
    +

    +
  16. +
  17. +

    Check that RAPIDS is working +

    ./rapids -j1
    +

    +
  18. +
+
+
+

We tested RAPIDS on Ubuntu 18.04 & 20.04. Note that the necessary Python and R packages are available in other Linux distributions, so if you decide to give it a try, let us know and we can update these docs.

+
    +
  1. +

    Install dependencies

    +
    sudo apt install libcurl4-openssl-dev
    +sudo apt install libssl-dev
    +sudo apt install libxml2-dev
    +sudo apt install libglpk40
    +
    +
  2. +
  3. +

    Install MySQL

    +
    sudo apt install libmysqlclient-dev
    +sudo apt install mysql-server
    +
    +
  4. +
  5. +

    Add key for R’s repository.

    +
    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
    +
    +
  6. +
  7. +

    Add R’s repository

    +
    +
    sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/'
    +
    +
    +
    +
    sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/'
    +
    +
    +
    +
  8. +
  9. +

    Install R 4.0. If you have other instances of R, we recommend uninstalling them

    +
    sudo apt update
    +sudo apt install r-base
    +
    +
  10. +
  11. +

    Install Pandoc and rmarkdown

    +
    sudo apt install pandoc
    +Rscript --vanilla -e 'install.packages("rmarkdown", repos="http://cran.us.r-project.org")'
    +
    +
  12. +
  13. +

    Install git

    +
    sudo apt install git
    +
    +
  14. +
  15. +

    Install miniconda

    +
  16. +
  17. +

    Restart your current shell

    +
  18. +
  19. +

    Clone our repo:

    +
    git clone https://github.com/carissalow/rapids
    +
    +
  20. +
  21. +

    Create a python virtual environment:

    +
    cd rapids
    +conda env create -f environment.yml -n MY_ENV_NAME
    +conda activate MY_ENV_NAME
    +
    +
  22. +
  23. +

    Install the R virtual environment management package (renv)

    +
    snakemake -j1 renv_install
    +
    +
  24. +
  25. +

    Restore the R virtual environment

    +
    +

    Run the following command to restore the R virtual environment using RSPM binaries +

    R -e 'renv::restore(repos = c(CRAN = "https://packagemanager.rstudio.com/all/__linux__/bionic/latest"))'
    +

    +
    +
    +

    Run the following command to restore the R virtual environment using RSPM binaries +

    R -e 'renv::restore(repos = c(CRAN = "https://packagemanager.rstudio.com/all/__linux__/focal/latest"))'
    +

    +
    +
    +

    If the fast installation command failed for some reason, you can restore the R virtual environment from source: +

    R -e 'renv::restore()'
    +

    +
    +
    +
    +

    Note

    +

    This step could take several minutes to complete, especially if you have less than 3Gb of RAM or packages need to be compiled from source. Please be patient and let it run until completion.

    +
    +
  26. +
  27. +

    Make RAPIDS script executable +

    chmod +x rapids
    +

    +
  28. +
  29. +

    Check that RAPIDS is working +

    ./rapids -j1
    +

    +
  30. +
+
+
+

There are several options varying in complexity:

+
    +
  • You can use our Docker instructions (tested)
  • +
  • You can use our Ubuntu 20.04 instructions on WSL2 (not tested but it will likely work)
  • +
  • Native installation (experimental). If you would like to contribute to RAPIDS you could try to install MySQL, miniconda, Python, and R 4.0+ in Windows and restore the Python and R virtual environments using steps 6 and 7 of the instructions for Mac. You can get in touch if you would like to discuss this with the team.
  • +
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/setup/overview/index.html b/1.3/setup/overview/index.html new file mode 100644 index 00000000..0d06da05 --- /dev/null +++ b/1.3/setup/overview/index.html @@ -0,0 +1,2135 @@ + + + + + + + + + + + + + + + + + + + + + + Overview - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Overview

+

Let’s review some key concepts we use throughout these docs:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Definition                Description
DeviceA mobile or wearable device, like smartphones, Fitbit wrist bands, Oura Rings, etc.
SensorA physical or digital module builtin in a device that produces a data stream. For example, a smartphone’s accelerometer or screen.
Data StreamSet of sensor data collected using a specific device with a particular ** format** and stored in a specific container. For example, smartphone (device) data collected with the AWARE Framework (format) and stored in a MySQL database (container).
Data Stream FormatSensor data produced by a data stream have columns with specific names and types. RAPIDS can process a data stream using a format.yaml file that describes the raw data columns and any necessary transformations.
Data Stream ContainerSensor data produced by a data stream can be stored in a database, electronic files, or arbitrary electronic containers. RAPIDS can pull (download) the data from a stream using a container script implemented in R or Python.
ParticipantA person that took part in a monitoring study
Behavioral featureA metric computed from raw sensor data quantifying the behavior of a participant. For example, time spent at home calculated from location data. These are also known as digital biomarkers
Time segmentTime segments (or epochs) are the time windows on which RAPIDS extracts behavioral features. For example, you might want to compute participants’ time at home every morning or only during weekends. You define time segments in a CSV file that RAPIDS processes.
Time zoneA string like America/New_York that represents a time zone where a device logged data. You can process data collected in single or multiple time zones for every participant.
Feature ProviderA script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB].
config.yamlA YAML file where you can modify parameters to process data streams and behavioral features. This is the heart of RAPIDS and the file that you will change the most.
credentials.yamlA YAML file where you can define credential groups (user, password, host, etc.) if your data stream needs to connect to a database or Web API
Participant file(s)A YAML file that links one or more smartphone or wearable devices used by a single participant. RAPIDS needs one file per participant.
+
+

What can I do with RAPIDS?

+ +
+
+

Hint

+ +
+

Frequently Asked Questions

+

General

+
What exactly is RAPIDS?

RAPIDS is a group of configuration files and R and Python scripts executed by Snakemake. You can get a copy of RAPIDS by cloning our Github repository.

+

RAPIDS is not a web application or server; all the processing is done in your laptop, server, or computer cluster.

+
+
How does RAPIDS work?

You will most of the time only have to modify configuration files in YAML format (config.yaml, credentials.yaml, and participant files pxx.yaml), and in CSV format (time zones and time segments).

+

RAPIDS pulls data from different data containers and processes it in steps. The input/output of each stage is saved as a CSV file for inspection; you can check the files created for each sensor on its documentation page.

+

All data is stored in data/, and all processing Python and R scripts are stored in src/.

+
User and File interactions in RAPIDS

In the figure below, we represent the interactions between users and files. After a user modifies the configuration files mentioned above, the Snakefile file will search for and execute the Snakemake rules that contain the Python or R scripts necessary to generate or update the required output files (behavioral features, plots, etc.).

+

+ +
Interaction diagram between the user, and important files in RAPIDS
+

+
+
Data flow in RAPIDS

In the figure below, we represent the flow of data in RAPIDS. In broad terms, smartphone and wearable devices log data streams with a certain format to a data container (database, file, etc.).

+

RAPIDS can connect to these containers if it has a format.yaml and a container.[R|py] script used to pull the correct data and mutate it to comply with RAPIDS’ internal data representation. Once the data stream is in RAPIDS, it goes through some basic transformations (scripts), one that assigns a time segment and a time zone to each data row, and another one that creates “episodes” of data for some sensors that need it (like screen, battery, activity recognition, and sleep intraday data).

+

After this, RAPIDS executes the requested PROVIDER script that computes behavioral features per time segment instance. After every feature is computed, they are joined per sensor, per participant, and study. Visualizations are built based on raw data or based on calculated features.

+

+ +
Data stream flow in RAPIDS
+

+
+
+
Is my data private?

Absolutely, you are processing your data with your own copy of RAPIDS in your laptop, server, or computer cluster, so neither we nor anyone else can access your datasets.

+
+
Do I need to have coding skills to use RAPIDS?

If you want to extract the behavioral features or visualizations that RAPIDS offers out of the box, the answer is no. However, you need to be comfortable running commands in your terminal and familiar with editing YAML files and CSV files.

+

If you want to add support for new data streams or behavioral features, you need to be familiar with R or Python.

+
+
Is RAPIDS open-source or free?

Yes, RAPIDS is both open-source and free.

+
+
How do I cite RAPIDS?

Please refer to our Citation guide; depending on what parts of RAPIDS you used, we also ask you to cite the work of other authors that shared their work.

+
+
I have a lot of data, can RAPIDS handle it/ is RAPIDS fast enough?

Yes, we use Snakemake under the hood, so you can automatically distribute RAPIDS execution over multiple cores or clusters. RAPIDS processes data per sensor and participant, so it can take advantage of this parallel processing.

+
+
What are the advantages of using RAPIDS over implementing my own analysis code?

We believe RAPIDS can benefit your analysis in several ways:

+
    +
  • RAPIDS has more than 250 behavioral features available, many of them tested and used by other researchers.
  • +
  • RAPIDS can extract features in dynamic time segments (for example, every x minutes, x hours, x days, x weeks, x months, etc.). This is handy because you don’t have to deal with time zones, daylight saving changes, or date arithmetic.
  • +
  • Your analysis is less prone to errors. Every participant sensor dataset is analyzed in the same way and isolated from each other.
  • +
  • If you have lots of data, out-of-the-box parallel execution will speed up your analysis, and if your computer crashes, RAPIDS will start from where it left off.
  • +
  • You can publish your analysis code along with your papers and be sure it will run exactly as it does on your computer.
  • +
  • You can still add your own behavioral features and data streams if you need to, and the community will be able to reuse your work.
  • +
+
+

Data Streams

+
Can I process smartphone data collected with Beiwe, PurpleRobot, or app X?

Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial. Open a new discussion in Github if you have any questions.

+

If you do so, let us know so we can integrate your work into RAPIDS.

+
+
Can I process data from Oura Rings, Actigraphs, or wearable X?

The only wearables we support at the moment are Empatica and Fitbit. However, get in touch if you need to process data from a different wearable. We have limited resources, so we add support for additional devices on an as-needed basis, but we would be happy to collaborate. Open a new discussion in Github if you have any questions.

+
+
Can I process smartphone or wearable data stored in PostgreSQL, Oracle, SQLite, CSV files, or data container X?

Yes, but you need to add a new data stream to RAPIDS (a new format.yaml and container script in R or Python). Follow this tutorial. If you are processing data streams we already support like AWARE, Fitbit, or Empatica and are just connecting to a different container, you can reuse their format.yaml and only implement a new container script. Open a new discussion in Github if you have any questions.

+

If you do so, let us know so we can integrate your work into RAPIDS.

+
+
I have participants that live in different time zones and some that travel; can RAPIDS handle this?

Yes, RAPIDS can handle single or multiple timezones per participant. You can use time zone data collected by smartphones or collected by hand.

+
+
Some of my participants used more than one device during my study; can RAPIDS handle this?

Yes, you can link more than one smartphone or wearable device to a single participant. RAPIDS will merge them and sort them automatically.

+
+
Some of my participants switched from Android to iOS or vice-versa during my study; can RAPIDS handle this?

Yes, data from multiple smartphones can be linked to a single participant. All iOS data is converted to Android data before merging it.

+
+

Extending RAPIDS

+
Can I add my own behavioral features/digital biomarkers?

Yes, you can implement your own features in R or Python following this tutorial

+
+
Can I extract behavioral features based on two or more sensors?

Yes, we do this for PHONE_DATA_YIELD (combines all phone sensors), PHONE_LOCATIONS (combines location and data yield data), PHONE_APPLICATIONS_BACKGROUND (combines screen and app usage data), and FITBIT_INTRADAY_STEPS (combines Fitbit and sleep and step data).

+

However, we haven’t come up with a user-friendly way to configure this, and currently, we join sensors on a case-by-case basis. This is mainly because not enough users have needed this functionality so far. Get in touch, and we can set it up together; the more use cases we are aware of, the easier it will be to integrate this into RAPIDS.

+
+
I know how to program in Python or R but not both. Can I still use or extend RAPIDS?

Yes, you don’t need to write any code to use RAPIDS out of the box. If you need to add support for new data streams or behavioral features you can use scripts in either language.

+
+
I have scripts that clean raw data from X sensor, can I use them with RAPIDS?

Yes, you can add them as a [MUTATION][SCRIPT] in the format.yaml of the data stream you are using. You will add a main function that will receive a data frame with the raw data for that sensor that, in turn, will be used to compute behavioral features.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/sitemap.xml b/1.3/sitemap.xml new file mode 100644 index 00000000..29c99370 --- /dev/null +++ b/1.3/sitemap.xml @@ -0,0 +1,287 @@ + + + https://www.rapids.science/1.3/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/setup/overview/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/workflow-examples/minimal/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/citation/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/contributing/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/setup/installation/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/setup/configuration/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/setup/execution/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/data-streams-introduction/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/aware-mysql/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/aware-csv/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/aware-influxdb/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/mandatory-phone-format/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/fitbitjson-mysql/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/fitbitjson-csv/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/fitbitparsed-mysql/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/fitbitparsed-csv/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/mandatory-fitbit-format/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/empatica-zip/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/mandatory-empatica-format/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/datastreams/add-new-data-streams/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/feature-introduction/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-accelerometer/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-activity-recognition/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-applications-crashes/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-applications-foreground/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-applications-notifications/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-battery/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-bluetooth/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-calls/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-conversation/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-data-yield/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-keyboard/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-light/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-locations/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-log/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-messages/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-screen/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-wifi-connected/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/phone-wifi-visible/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-calories-intraday/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-data-yield/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-heartrate-summary/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-heartrate-intraday/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-sleep-summary/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-sleep-intraday/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-steps-summary/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/fitbit-steps-intraday/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-accelerometer/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-heartrate/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-temperature/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-electrodermal-activity/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-blood-volume-pulse/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-inter-beat-interval/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/empatica-tags/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/features/add-new-features/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/visualizations/data-quality-visualizations/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/visualizations/feature-visualizations/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/workflow-examples/analysis/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/git-flow/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/remote-support/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/virtual-environments/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/documentation/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/testing/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/test-cases/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/developers/validation-schema-config/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/migrating-from-old-versions/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/code_of_conduct/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/common-errors/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/team/ + 2021-06-01 + daily + + https://www.rapids.science/1.3/change-log/ + 2021-06-01 + daily + + \ No newline at end of file diff --git a/1.3/sitemap.xml.gz b/1.3/sitemap.xml.gz new file mode 100644 index 00000000..a6dbbef0 Binary files /dev/null and b/1.3/sitemap.xml.gz differ diff --git a/1.3/snippets/aware_format/index.html b/1.3/snippets/aware_format/index.html new file mode 100644 index 00000000..e4354e55 --- /dev/null +++ b/1.3/snippets/aware_format/index.html @@ -0,0 +1,2974 @@ + + + + + + + + + + + + + + + + + + + + + + Aware format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ + + + + + + + +

Aware format

+ +

If you collected sensor data with the vanilla (original) AWARE mobile clients, you shouldn’t need to modify this format (described below).

+

Remember that a format maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs.

+

The yaml file that describes the format of this data stream is at: +

src/data/streams/aware_csv/format.yaml
+

+

For some sensors, we need to transform iOS data into Android format; you can refer to OS complex mapping for learn how this works.

+
+

Hint

+

The mappings in this stream (RAPIDS/Stream) are the same names because AWARE data was the first stream RAPIDS supported, meaning that it considers AWARE column names the default.

+
+
PHONE_ACCELEROMETER
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_VALUES_0double_values_0
DOUBLE_VALUES_1double_values_1
DOUBLE_VALUES_2double_values_2
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_ACTIVITY_RECOGNITION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEactivity_name
ACTIVITY_TYPEactivity_type
CONFIDENCEconfidence
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
ACTIVITY_NAMEFLAG_TO_MUTATE
ACTIVITY_TYPEFLAG_TO_MUTATE
CONFIDENCEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
ACTIVITIESactivities
CONFIDENCEconfidence
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/activity_recogniton_ios_unification.R
+
+
+

Note

+

For RAPIDS columns of ACTIVITY_NAME and ACTIVITY_TYPE:

+
    +
  • if stream’s activities field is automotive, set ACTIVITY_NAME = in_vehicle and ACTIVITY_TYPE = 0
  • +
  • if stream’s activities field is cycling, set ACTIVITY_NAME = on_bicycle and ACTIVITY_TYPE = 1
  • +
  • if stream’s activities field is walking, set ACTIVITY_NAME = walking and ACTIVITY_TYPE = 7
  • +
  • if stream’s activities field is running, set ACTIVITY_NAME = running and ACTIVITY_TYPE = 8
  • +
  • if stream’s activities field is stationary, set ACTIVITY_NAME = still and ACTIVITY_TYPE = 3
  • +
  • if stream’s activities field is unknown, set ACTIVITY_NAME = unknown and ACTIVITY_TYPE = 4
  • +
+

For RAPIDS CONFIDENCE column:

+
    +
  • if stream’s confidence field is 0, set CONFIDENCE = 0
  • +
  • if stream’s confidence field is 1, set CONFIDENCE = 50
  • +
  • if stream’s confidence field is 2, set CONFIDENCE = 100
  • +
+
+
+
+
+
PHONE_APPLICATIONS_CRASHES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
APPLICATION_VERSIONapplication_version
ERROR_SHORTerror_short
ERROR_LONGerror_long
ERROR_CONDITIONerror_condition
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_FOREGROUND
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
IS_SYSTEM_APPis_system_app
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_APPLICATIONS_NOTIFICATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
APPLICATION_NAMEapplication_name
TEXTtext
SOUNDsound
VIBRATEvibrate
DEFAULTSdefaults
FLAGSflags
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_BATTERY
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSbattery_status
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BATTERY_STATUSFLAG_TO_MUTATE
BATTERY_LEVELbattery_level
BATTERY_SCALEbattery_scale
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
BATTERY_STATUSbattery_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/battery_ios_unification.R
+
+
+

Note

+

For RAPIDS BATTERY_STATUS column:

+
    +
  • if stream’s battery_status field is 3, set BATTERY_STATUS = 5 (full status)
  • +
  • if stream’s battery_status field is 1, set BATTERY_STATUS = 3 (discharge)
  • +
+
+
+
+

Same as ANDROID

+
+
+
+
PHONE_BLUETOOTH
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
BT_ADDRESSbt_address
BT_NAMEbt_name
BT_RSSIbt_rssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+
PHONE_CALLS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEcall_type
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
CALL_TYPEFLAG_TO_MUTATE
CALL_DURATIONcall_duration
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
CALL_TYPEcall_type
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/calls_ios_unification.R
+
+
+

Note

+

We transform iOS call logs into Android’s format. iOS stores call status: 1=incoming, 2=connected, 3=dialing, 4=disconnected, as opposed to Android’s events: 1=incoming, 2=outgoing, 3=missed.

+

We follow this algorithm to convert iOS call data (there are some inaccuracies in the way we handle sequences, see new rules below):

+
    +
  • Search for the disconnected (4) status as it is common to all calls
  • +
  • Group all events that preceded every status 4
  • +
  • We convert every 1,2,4 (or 2,1,4) sequence to an incoming call
  • +
  • We convert every 3,2,4 (or 2,3,4) sequence to an outgoing call
  • +
  • We convert every 1,4 or 3,4 sequence to a missed call (either incoming or outgoing)
  • +
  • We set the duration of the call to be the sum of every status (dialing/ringing to hangup) as opposed to the duration of the last status (pick up to hang up)
  • +
+

Tested with an Android (OnePlus 7T) and an iPhone XR

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Call typeAndroid (duration)iOS (duration)New Rule
Outgoing missed ended by me2 (0)3,4 (0,X)3,4 is converted to 2 with duration 0
Outgoing missed ended by them2(0)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2*
Incoming missed ended by meNA**1,4 (0,X)1,4 is converted to 3 with duration 0
Incoming missed ended by them3(0)1,4 (0,X)1,4 is converted to 3 with duration 0
Outgoing answered2(X excluding dialing time)3,2,4 (0,X,X2)3,2,4 is converted to 2 with duration X2
Incoming answered1(X excluding dialing time)1,2,4 (0,X,X2)1,2,4 is converted to 1 with duration X2
+

.* There is no way to differentiate an outgoing missed call ended by them from an outgoing answered call because the phone goes directly to voice mail and it counts as call time (essentially the voice mail answered).

+

.** Android does not record incoming missed calls ended by the participant, just those ended by the person calling or ignored by the participant.

+
+
+
+
+
PHONE_CONVERSATION
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_ENERGYdouble_energy
INFERENCEinference
DOUBLE_CONVO_STARTFLAG_TO_MUTATE
DOUBLE_CONVO_ENDFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + + + + + +
Script columnStream column
DOUBLE_CONVO_STARTdouble_convo_start
DOUBLE_CONVO_ENDdouble_convo_end
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/conversation_ios_timestamp.R
+
+
+

Note

+

For RAPIDS columns of DOUBLE_CONVO_START and DOUBLE_CONVO_END:

+
    +
  • if stream’s double_convo_start field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_START = 1000 * double_convo_start.
  • +
  • if stream’s double_convo_end field is smaller than 9999999999, it is in seconds instead of milliseconds. Set DOUBLE_CONVO_END = 1000 * double_convo_end.
  • +
+
+
+
+
+
PHONE_KEYBOARD
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
PACKAGE_NAMEpackage_name
BEFORE_TEXTbefore_text
CURRENT_TEXTcurrent_text
IS_PASSWORDis_password
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LIGHT
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LIGHT_LUXdouble_light_lux
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_LOCATIONS
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
DOUBLE_LATITUDEdouble_latitude
DOUBLE_LONGITUDEdouble_longitude
DOUBLE_BEARINGdouble_bearing
DOUBLE_SPEEDdouble_speed
DOUBLE_ALTITUDEdouble_altitude
PROVIDERprovider
ACCURACYaccuracy
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_LOG
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
LOG_MESSAGElog_message
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_MESSAGES
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MESSAGE_TYPEmessage_type
TRACEtrace
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

This sensor is not supported by iOS devices.

+
+
+
+
PHONE_SCREEN
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSscreen_status
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SCREEN_STATUSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • COLUMN_MAPPINGS
  • +
+ + + + + + + + + + + + + +
Script columnStream column
SCREEN_STATUSscreen_status
+
    +
  • SCRIPTS
  • +
+
src/data/streams/mutations/phone/aware/screen_ios_unification.R
+
+
+

Note

+

For SCREEN_STATUS RAPIDS column:

+
    +
  • if stream’s screen_status field is 2 (lock episode), set SCREEN_STATUS = 0 (off episode).
  • +
+
+
+
+
+
PHONE_WIFI_CONNECTED
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
MAC_ADDRESSmac_address
SSIDssid
BSSIDbssid
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Same as ANDROID

+
+
+
+
PHONE_WIFI_VISIBLE
+

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPtimestamp
DEVICE_IDdevice_id
SSIDssid
BSSIDbssid
SECURITYsecurity
FREQUENCYfrequency
RSSIrssi
+

MUTATION

+
    +
  • COLUMN_MAPPINGS (None)
  • +
  • SCRIPTS (None)
  • +
+
+
+

Only old iOS versions supported this sensor (same mapping as Android).

+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/snippets/database/index.html b/1.3/snippets/database/index.html new file mode 100644 index 00000000..5e268d1b --- /dev/null +++ b/1.3/snippets/database/index.html @@ -0,0 +1,1879 @@ + + + + + + + + + + + + + + + + + + + + + + Database - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ + + + + + + + +

Database

+ +
Setting up a DATABASE_GROUP and its connection credentials.
    +
  1. +

    If you haven’t done so, create an empty file called credentials.yaml in your RAPIDS root directory:

    +
  2. +
  3. +

    Add the following lines to credentials.yaml and replace your database-specific credentials (user, password, host, and database):

    +
    MY_GROUP:
    +  database: MY_DATABASE
    +  host: MY_HOST
    +  password: MY_PASSWORD
    +  port: 3306
    +  user: MY_USER
    +
    +
  4. +
  5. +

    Notes

    +
      +
    1. +

      The label [MY_GROUP] is arbitrary but it has to match the [DATABASE_GROUP] attribute of the data stream you choose to use.

      +
    2. +
    3. +

      Indentation matters

      +
    4. +
    5. +

      You can have more than one credentials group in credentials.yaml

      +
    6. +
    +
  6. +
+
Upgrading from ./.env from RAPIDS 0.x

In RAPIDS versions 0.x, database credentials were stored in a ./.env file. If you are migrating from that type of file, you have two options:

+
    +
  1. +

    Migrate your credentials by hand:

    +
    +
    [MY_GROUP]
    +user=MY_USER
    +password=MY_PASSWORD
    +host=MY_HOST
    +port=3306
    +database=MY_DATABASE
    +
    +
    +
    +
    MY_GROUP:
    +  user: MY_USER
    +  password: MY_PASSWORD
    +  host: MY_HOST
    +  port: 3306
    +  database: MY_DATABASE
    +
    +
    +
    +
  2. +
  3. +

    Use the migration script we provide (make sure your conda environment is active):

    +
    python tools/update_format_env.py
    +
    +
  4. +
+
+
Connecting to localhost (host machine) from inside our docker container.

If you are using RAPIDS’ docker container and Docker-for-mac or Docker-for-Windows 18.03+, you can connect to a MySQL database in your host machine using host.docker.internal instead of 127.0.0.1 or localhost. In a Linux host, you need to run our docker container using docker run --network="host" -d moshiresearch/rapids:latest and then 127.0.0.1 will point to your host machine.

+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/snippets/feature_introduction_example/index.html b/1.3/snippets/feature_introduction_example/index.html new file mode 100644 index 00000000..0f713775 --- /dev/null +++ b/1.3/snippets/feature_introduction_example/index.html @@ -0,0 +1,1852 @@ + + + + + + + + + + + + + + + + + + + + + + Feature introduction example - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ + + + + + + + +

Feature introduction example

+ +
    +
  1. +

    Sensor section

    +

    Each sensor (accelerometer, screen, etc.) of every supported device (smartphone, Fitbit, etc.) has a section in the config.yaml with parameters and feature PROVIDERS.

    +
  2. +
  3. +

    Sensor Parameters.

    +

    Each sensor section has one or more parameters. These are parameters that affect different aspects of how the raw data is pulled, and processed.

    +

    The CONTAINER parameter exists for every sensor, but some sensors will have extra parameters like [PHONE_LOCATIONS].

    +

    We explain these parameters in a table at the top of each sensor documentation page.

    +
  4. +
  5. +

    Sensor Providers

    +

    Each object in this list represents a feature PROVIDER. Each sensor can have zero, one, or more providers.

    +

    A PROVIDER is a script that creates behavioral features for a specific sensor. Providers are created by the core RAPIDS team or by the community, which are named after its first author like [PHONE_LOCATIONS][DORYAB].

    +

    In this example, there are two accelerometer feature providers RAPIDS and PANDA.

    +
  6. +
  7. +

    PROVIDER Parameters

    +

    Each PROVIDER has parameters that affect the computation of the behavioral features it offers.

    +

    These parameters include at least a [COMPUTE] flag that you switch to True to extract a provider’s behavioral features.

    +

    We explain every provider’s parameter in a table under the Parameters description heading on each provider documentation page.

    +
  8. +
  9. +

    PROVIDER Features

    +

    Each PROVIDER offers a set of behavioral features.

    +

    These features are grouped in an array for some providers, like those for RAPIDS provider. For others, they are grouped in a collection of arrays, like those for PANDAS provider.

    +

    In either case, you can delete the features you are not interested in, and they will not be included in the sensor’s output feature file.

    +

    We explain each behavioral feature in a table under the Features description heading on each provider documentation page.

    +
  10. +
  11. +

    PROVIDER script

    +

    Each PROVIDER has a SRC_SCRIPT that points to the script implementing its behavioral features.

    +

    It has to be a relative path from RAPIDS’ root folder and the script’s parent folder should be named after the provider, e.g. panda.

    +
  12. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/snippets/jsonfitbit_format/index.html b/1.3/snippets/jsonfitbit_format/index.html new file mode 100644 index 00000000..750824f8 --- /dev/null +++ b/1.3/snippets/jsonfitbit_format/index.html @@ -0,0 +1,2373 @@ + + + + + + + + + + + + + + + + + + + + + + Jsonfitbit format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ + + + + + + + +

Jsonfitbit format

+ +

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitjson_csv/format.yaml
+
+

If you want RAPIDS to process Fitbit sensor data using this stream, you will need to map DEVICE_ID and JSON_FITBIT_COLUMN to your own raw data columns inside each sensor section in format.yaml.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESOUTOFRANGEFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESFATBURNFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESCARDIOFLAG_TO_MUTATE
HEARTRATE_DAILY_CALORIESPEAKFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
HEARTRATEFLAG_TO_MUTATE
HEARTRATE_ZONEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_heartrate_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-07”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1200.6102,”max”:88,”min”:31,”minutes”:1058,”name”:”Out of Range”},{“caloriesOut”:760.3020,”max”:120,”min”:86,”minutes”:366,”name”:”Fat Burn”},{“caloriesOut”:15.2048,”max”:146,”min”:120,”minutes”:2,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:72}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:68},{“time”:”00:01:00”,”value”:67},{“time”:”00:02:00”,”value”:67},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-08”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:1100.1120,”max”:89,”min”:30,”minutes”:921,”name”:”Out of Range”},{“caloriesOut”:660.0012,”max”:118,”min”:82,”minutes”:361,”name”:”Fat Burn”},{“caloriesOut”:23.7088,”max”:142,”min”:108,”minutes”:3,”name”:”Cardio”},{“caloriesOut”:0,”max”:221,”min”:148,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:70}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:77},{“time”:”00:01:00”,”value”:75},{“time”:”00:02:00”,”value”:73},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“activities-heart”:[{“dateTime”:”2020-10-09”,”value”:{“customHeartRateZones”:[],”heartRateZones”:[{“caloriesOut”:750.3615,”max”:77,”min”:30,”minutes”:851,”name”:”Out of Range”},{“caloriesOut”:734.1516,”max”:107,”min”:77,”minutes”:550,”name”:”Fat Burn”},{“caloriesOut”:131.8579,”max”:130,”min”:107,”minutes”:29,”name”:”Cardio”},{“caloriesOut”:0,”max”:220,”min”:130,”minutes”:0,”name”:”Peak”}],”restingHeartRate”:69}}],”activities-heart-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:90},{“time”:”00:01:00”,”value”:89},{“time”:”00:02:00”,”value”:88},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMEFLAG_TO_MUTATE
LOCAL_END_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
EFFICIENCYFLAG_TO_MUTATE
MINUTES_AFTER_WAKEUPFLAG_TO_MUTATE
MINUTES_ASLEEPFLAG_TO_MUTATE
MINUTES_AWAKEFLAG_TO_MUTATE
MINUTES_TO_FALL_ASLEEPFLAG_TO_MUTATE
MINUTES_IN_BEDFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_summary_json.py
    +- src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
DEVICE_IDdevice_id
TYPE_EPISODE_IDFLAG_TO_MUTATE
DURATIONFLAG_TO_MUTATE
IS_MAIN_SLEEPFLAG_TO_MUTATE
TYPEFLAG_TO_MUTATE
LEVELFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_sleep_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

    +

    All columns except DEVICE_ID are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-10”,”duration”:3600000,”efficiency”:92,”endTime”:”2020-10-10T16:37:00.000”,”infoCode”:2,”isMainSleep”:false,”levels”:{“data”:[{“dateTime”:”2020-10-10T15:36:30.000”,”level”:”restless”,”seconds”:60},{“dateTime”:”2020-10-10T15:37:30.000”,”level”:”asleep”,”seconds”:660},{“dateTime”:”2020-10-10T15:48:30.000”,”level”:”restless”,”seconds”:60},…], “summary”:{“asleep”:{“count”:0,”minutes”:56},”awake”:{“count”:0,”minutes”:0},”restless”:{“count”:3,”minutes”:4}}},”logId”:26315914306,”minutesAfterWakeup”:0,”minutesAsleep”:55,”minutesAwake”:5,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T15:36:30.000”,”timeInBed”:60,”type”:”classic”},{“dateOfSleep”:”2020-10-10”,”duration”:22980000,”efficiency”:88,”endTime”:”2020-10-10T08:10:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-10T01:46:30.000”,”level”:”light”,”seconds”:420},{“dateTime”:”2020-10-10T01:53:30.000”,”level”:”deep”,”seconds”:1230},{“dateTime”:”2020-10-10T02:14:00.000”,”level”:”light”,”seconds”:360},…], “summary”:{“deep”:{“count”:3,”minutes”:92,”thirtyDayAvgMinutes”:0},”light”:{“count”:29,”minutes”:193,”thirtyDayAvgMinutes”:0},”rem”:{“count”:4,”minutes”:33,”thirtyDayAvgMinutes”:0},”wake”:{“count”:28,”minutes”:65,”thirtyDayAvgMinutes”:0}}},”logId”:26311786557,”minutesAfterWakeup”:0,”minutesAsleep”:318,”minutesAwake”:65,”minutesToFallAsleep”:0,”startTime”:”2020-10-10T01:46:30.000”,”timeInBed”:383,”type”:”stages”}],”summary”:{“stages”:{“deep”:92,”light”:193,”rem”:33,”wake”:65},”totalMinutesAsleep”:373,”totalSleepRecords”:2,”totalTimeInBed”:443}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-11”,”duration”:41640000,”efficiency”:89,”endTime”:”2020-10-11T11:47:00.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-11T00:12:30.000”,”level”:”wake”,”seconds”:450},{“dateTime”:”2020-10-11T00:20:00.000”,”level”:”light”,”seconds”:870},{“dateTime”:”2020-10-11T00:34:30.000”,”level”:”wake”,”seconds”:780},…], “summary”:{“deep”:{“count”:4,”minutes”:52,”thirtyDayAvgMinutes”:62},”light”:{“count”:32,”minutes”:442,”thirtyDayAvgMinutes”:364},”rem”:{“count”:6,”minutes”:68,”thirtyDayAvgMinutes”:58},”wake”:{“count”:29,”minutes”:132,”thirtyDayAvgMinutes”:94}}},”logId”:26589710670,”minutesAfterWakeup”:1,”minutesAsleep”:562,”minutesAwake”:132,”minutesToFallAsleep”:0,”startTime”:”2020-10-11T00:12:30.000”,”timeInBed”:694,”type”:”stages”}],”summary”:{“stages”:{“deep”:52,”light”:442,”rem”:68,”wake”:132},”totalMinutesAsleep”:562,”totalSleepRecords”:1,”totalTimeInBed”:694}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524{“sleep”:[{“dateOfSleep”:”2020-10-12”,”duration”:28980000,”efficiency”:93,”endTime”:”2020-10-12T09:34:30.000”,”infoCode”:0,”isMainSleep”:true,”levels”:{“data”:[{“dateTime”:”2020-10-12T01:31:00.000”,”level”:”wake”,”seconds”:600},{“dateTime”:”2020-10-12T01:41:00.000”,”level”:”light”,”seconds”:60},{“dateTime”:”2020-10-12T01:42:00.000”,”level”:”deep”,”seconds”:2340},…], “summary”:{“deep”:{“count”:4,”minutes”:63,”thirtyDayAvgMinutes”:59},”light”:{“count”:27,”minutes”:257,”thirtyDayAvgMinutes”:364},”rem”:{“count”:5,”minutes”:94,”thirtyDayAvgMinutes”:58},”wake”:{“count”:24,”minutes”:69,”thirtyDayAvgMinutes”:95}}},”logId”:26589710673,”minutesAfterWakeup”:0,”minutesAsleep”:415,”minutesAwake”:68,”minutesToFallAsleep”:0,”startTime”:”2020-10-12T01:31:00.000”,”timeInBed”:483,”type”:”stages”}],”summary”:{“stages”:{“deep”:63,”light”:257,”rem”:94,”wake”:69},”totalMinutesAsleep”:415,”totalSleepRecords”:1,”totalTimeInBed”:483}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_summary_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMEFLAG_TO_MUTATE
STEPSFLAG_TO_MUTATE
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS

    + + + + + + + + + + + + + +
    Script columnStream column
    JSON_FITBIT_COLUMNfitbit_data
    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/parse_steps_intraday_json.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
    +

    Note

    +

    TIMESTAMP, LOCAL_DATE_TIME, and STEPS are parsed from JSON_FITBIT_COLUMN. JSON_FITBIT_COLUMN is a string column containing the JSON objects returned by Fitbit’s API. See an example of the raw data RAPIDS expects for this data stream:

    +
    Example of the expected raw data + + + + + + + + + + + + + + + + + + + + +
    device_idfitbit_data
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-07”,”value”:”1775”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:5},{“time”:”00:01:00”,”value”:3},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-08”,”value”:”3201”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:14},{“time”:”00:01:00”,”value”:11},{“time”:”00:02:00”,”value”:10},…],”datasetInterval”:1,”datasetType”:”minute”}}
    a748ee1a-1d0b-4ae9-9074-279a2b6ba524“activities-steps”:[{“dateTime”:”2020-10-09”,”value”:”998”}],”activities-steps-intraday”:{“dataset”:[{“time”:”00:00:00”,”value”:0},{“time”:”00:01:00”,”value”:0},{“time”:”00:02:00”,”value”:0},…],”datasetInterval”:1,”datasetType”:”minute”}}
    +
    +
    +
  • +
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/snippets/parsedfitbit_format/index.html b/1.3/snippets/parsedfitbit_format/index.html new file mode 100644 index 00000000..5b75f307 --- /dev/null +++ b/1.3/snippets/parsedfitbit_format/index.html @@ -0,0 +1,2424 @@ + + + + + + + + + + + + + + + + + + + + + + Parsedfitbit format - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ + + + + + + + +

Parsedfitbit format

+ +

The format.yaml maps and transforms columns in your raw data stream to the mandatory columns RAPIDS needs for Fitbit sensors. This file is at:

+
src/data/streams/fitbitparsed_mysql/format.yaml
+
+

If you want to use this stream with your data, modify every sensor in format.yaml to map all columns except TIMESTAMP in [RAPIDS_COLUMN_MAPPINGS] to your raw data column names.

+

All columns are mandatory; however, all except device_id and local_date_time can be empty if you don’t have that data. Just have in mind that some features will be empty if some of these columns are empty.

+
FITBIT_HEARTRATE_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATE_DAILY_RESTINGHRheartrate_daily_restinghr
HEARTRATE_DAILY_CALORIESOUTOFRANGEheartrate_daily_caloriesoutofrange
HEARTRATE_DAILY_CALORIESFATBURNheartrate_daily_caloriesfatburn
HEARTRATE_DAILY_CALORIESCARDIOheartrate_daily_caloriescardio
HEARTRATE_DAILY_CALORIESPEAKheartrate_daily_caloriespeak
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrate_daily_restinghrheartrate_daily_caloriesoutofrangeheartrate_daily_caloriesfatburnheartrate_daily_caloriescardioheartrate_daily_caloriespeak
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07721200.6102760.302015.20480
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-08701100.1120660.001223.70880
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-0969750.3615734.1516131.85790
+
+
+
+
FITBIT_HEARTRATE_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
HEARTRATEheartrate
HEARTRATE_ZONEheartrate_zone
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the raw data RAPIDS expects for this data stream + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timeheartrateheartrate_zone
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:0068outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:0067outofrange
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:0067outofrange
+
+
+
+
FITBIT_SLEEP_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMEFLAG_TO_MUTATE
LOCAL_START_DATE_TIMElocal_start_date_time
LOCAL_END_DATE_TIMElocal_end_date_time
DEVICE_IDdevice_id
EFFICIENCYefficiency
MINUTES_AFTER_WAKEUPminutes_after_wakeup
MINUTES_ASLEEPminutes_asleep
MINUTES_AWAKEminutes_awake
MINUTES_TO_FALL_ASLEEPminutes_to_fall_asleep
MINUTES_IN_BEDminutes_in_bed
IS_MAIN_SLEEPis_main_sleep
TYPEtype
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    - src/data/streams/mutations/fitbit/add_local_date_time.py
    +- src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2. We support both but ignore v1’s count_awake, duration_awake, and count_awakenings, count_restless, duration_restless columns.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_start_date_timelocal_end_date_timeefficiencyminutes_after_wakeupminutes_asleepminutes_awakeminutes_to_fall_asleepminutes_in_bedis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 15:36:302020-10-10 16:37:009205550600classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-10 01:46:302020-10-10 08:10:008803186503831stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-11 00:12:302020-10-11 11:47:0089156213206941stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-12 01:31:002020-10-12 09:34:309304156804831stages
+
+
+
+
FITBIT_SLEEP_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
LOCAL_DATE_TIMElocal_date_time
DEVICE_IDdevice_id
TYPE_EPISODE_IDtype_episode_id
DURATIONduration
IS_MAIN_SLEEPis_main_sleep
TYPEtype
LEVELlevel
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+

Fitbit API has two versions for sleep data, v1 and v1.2, we support both.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
device_idtype_episode_idlocal_date_timedurationlevelis_main_sleeptype
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:36:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:37:30660asleep0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba52402020-10-10 15:48:3060restless0classic
a748ee1a-1d0b-4ae9-9074-279a2b6ba524
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:46:30420light1stages
a748ee1a-1d0b-4ae9-9074-279a2b6ba52412020-10-10 01:53:301230deep1stages
+
+
+
+
FITBIT_STEPS_SUMMARY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-071775
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-083201
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-09998
+
+
+
+
FITBIT_STEPS_INTRADAY

RAPIDS_COLUMN_MAPPINGS

+ + + + + + + + + + + + + + + + + + + + + + + + + +
RAPIDS columnStream column
TIMESTAMPFLAG_TO_MUTATE
DEVICE_IDdevice_id
LOCAL_DATE_TIMElocal_date_time
STEPSsteps
+

MUTATION

+
    +
  • +

    COLUMN_MAPPINGS (None)

    +
  • +
  • +

    SCRIPTS

    +
    src/data/streams/mutations/fitbit/add_zero_timestamp.py
    +
    +
  • +
+
+

Note

+

add_zero_timestamp adds an all-zero column called timestamp that will be filled in later in the pipeline by readable_time.R converting LOCAL_DATE_TIME to a unix timestamp taking into account single or multiple time zones.

+
Example of the expected raw data + + + + + + + + + + + + + + + + + + + + + + + + +
device_idlocal_date_timesteps
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:00:005
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:01:003
a748ee1a-1d0b-4ae9-9074-279a2b6ba5242020-10-07 00:02:000
+
+
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/stylesheets/extra.css b/1.3/stylesheets/extra.css new file mode 100644 index 00000000..5638aeaf --- /dev/null +++ b/1.3/stylesheets/extra.css @@ -0,0 +1,51 @@ +@media screen and (min-width: 76.25em) { + .md-nav__item--section { + display: block; + margin: 1.75em 0; + } + + .md-nav :not(.md-nav--primary) > .md-nav__list { + padding-left: 7px; + } +} +.md-nav__item .md-nav__link--active { + color: var(--md-typeset-a-color); + background-color: var(--md-code-bg-color); +} + +div[data-md-component=announce] { + background-color: rgba(255,145,0,.1); +} +div[data-md-component=announce]>div#announce-msg{ + color: var(--md-admonition-fg-color); + font-size: .8rem; + text-align: center; + margin: 15px; +} +div[data-md-component=announce]>div#announce-msg>a{ + color: var(--md-typeset-a-color); + text-decoration: underline; +} + +.md-typeset table:not([class]) th { + min-width: 0rem; +} + +/* Users and contributors grid */ +.users { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); + grid-template-rows: auto; + grid-gap: 15px; +} + +.users > div { + display: flex; + justify-content: center; + align-items: center; +} + +.users > div > img { + max-height: 100px; + object-fit: contain; +} \ No newline at end of file diff --git a/1.3/team/index.html b/1.3/team/index.html new file mode 100644 index 00000000..7b012c11 --- /dev/null +++ b/1.3/team/index.html @@ -0,0 +1,2282 @@ + + + + + + + + + + + + + + + + + + + + + + Team - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

RAPIDS Team

+

If you are interested in contributing feel free to submit a pull request or contact us.

+

Core Team

+

Julio Vega (Designer and Lead Developer)

+
About

Julio Vega is a postdoctoral associate at the Mobile Sensing + Health Institute. He is interested in personalized methodologies to monitor chronic conditions that affect daily human behavior using mobile and wearable data.

+ +
+

Meng Li

+
About

Meng Li received her Master of Science degree in Information Science from the University of Pittsburgh. She is interested in applying machine learning algorithms to the medical field.

+ +
+

Abhineeth Reddy Kunta

+
About

Abhineeth Reddy Kunta is a Senior Software Engineer with the Mobile Sensing + Health Institute. He is experienced in software development and specializes in building solutions using machine learning. Abhineeth likes exploring ways to leverage technology in advancing medicine and education. Previously he worked as a Computer Programmer at Georgia Department of Public Health. He has a master’s degree in Computer Science from George Mason University.

+
+

Kwesi Aguillera

+
About

Kwesi Aguillera is currently in his first year at the University of Pittsburgh pursuing a Master of Sciences in Information Science specializing in Big Data Analytics. He received his Bachelor of Science degree in Computer Science and Management from the University of the West Indies. Kwesi considers himself a full stack developer and looks forward to applying this knowledge to big data analysis.

+ +
+

Echhit Joshi

+
About

Echhit Joshi is a Masters student at the School of Computing and Information at University of Pittsburgh. His areas of interest are Machine/Deep Learning, Data Mining, and Analytics.

+ +
+

Nicolas Leo

+
About

Nicolas is a rising senior studying computer science at the University of Pittsburgh. His academic interests include databases, machine learning, and application development. After completing his undergraduate degree, he plans to attend graduate school for a MS in Computer Science with a focus on Intelligent Systems.

+
+

Nikunj Goel

+
About

Nik is a graduate student at the University of Pittsburgh pursuing Master of Science in Information Science. He earned his Bachelor of Technology degree in Information Technology from India. He is a Data Enthusiasts and passionate about finding the meaning out of raw data. In a long term, his goal is to create a breakthrough in Data Science and Deep Learning.

+ +
+

Community Contributors

+

Agam Kumar

+
About

Agam is a junior at Carnegie Mellon University studying Statistics and Machine Learning and pursuing an additional major in Computer Science. He is a member of the Data Science team in the Health and Human Performance Lab at CMU and has keen interests in software development and data science. His research interests include ML applications in medicine.

+ +
+

Yasaman S. Sefidgar

+
About +
+

Joe Kim

+
About +
+

Brinnae Bent

+
About +
+

Stephen Price

+
About

Carnegie Mellon University

+
+

Neil Singh

+
About

University of Virginia

+
+

Advisors

+

Afsaneh Doryab

+
About +
+

Carissa Low

+
About +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/visualizations/data-quality-visualizations/index.html b/1.3/visualizations/data-quality-visualizations/index.html new file mode 100644 index 00000000..ae074538 --- /dev/null +++ b/1.3/visualizations/data-quality-visualizations/index.html @@ -0,0 +1,2036 @@ + + + + + + + + + + + + + + + + + + + + + + Data Quality - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Data Quality Visualizations

+

We showcase these visualizations with a test study that collected 14 days of smartphone and Fitbit data from two participants (example01 and example02) and extracted behavioral features within five time segments (daily, morning, afternoon, evening, and night).

+
+

Note

+

Time segments (e.g. daily, morning, etc.) can have multiple instances (day 1, day 2, or morning 1, morning 2, etc.)

+
+

1. Histograms of phone data yield

+

RAPIDS provides two histograms that show the number of time segment instances that had a certain ratio of valid yielded minutes and hours, respectively. A valid yielded minute has at least 1 row of data from any smartphone sensor and a valid yielded hour contains at least M valid minutes.

+

These plots can be used as a rough indication of the smartphone monitoring coverage during a study aggregated across all participants. For example, the figure below shows a valid yielded minutes histogram for daily segments and we can infer that the monitoring coverage was very good since almost all segments contain at least 90 to 100% of the expected sensed minutes.

+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+
+ +
Histogram of the data yielded minute ratio for a single participant during five time segments (daily, morning, afternoon, evening, and night)
+
+ +

2. Heatmaps of overall data yield

+

These heatmaps are a break down per time segment and per participant of Visualization 1. Heatmap’s rows represent participants, columns represent time segment instances and the cells’ color represent the valid yielded minute or hour ratio for a participant during a time segment instance.

+

As different participants might join a study on different dates and time segments can be of any length and start on any day, the x-axis can be labelled with the absolute time of the start of each time segment instance or the time delta between the start of each time segment instance minus the start of the first instance. These plots provide a quick study overview of the monitoring coverage per person and per time segment.

+

The figure below shows the heatmap of the valid yielded minute ratio for participants example01 and example02 on daily segments and, as we inferred from the previous histogram, the lighter (yellow) color on most time segment instances (cells) indicate both phones sensed data without interruptions for most days (except for the first and last ones).

+
+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+

+ +
Overall compliance heatmap for all participants
+

+
+
+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+

+ +
Overall compliance heatmap for all participants
+

+
+
+

3. Heatmap of recorded phone sensors

+

In these heatmaps rows represent time segment instances, columns represent minutes since the start of a time segment instance, and cells’ color shows the number of phone sensors that logged at least one row of data during those 1-minute windows.

+

RAPIDS creates a plot per participant and per time segment and can be used as a rough indication of whether time-based sensors were following their sensing schedule (e.g. if location was being sensed every 2 minutes).

+

The figure below shows this heatmap for phone sensors collected by participant example01 in daily time segments from Apr 23rd 2020 to May 4th 2020. We can infer that for most of the monitoring time, the participant’s phone logged data from at least 7 sensors each minute.

+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+
+ +
Heatmap of the recorded phone sensors per minute and per time segment of a single participant
+
+ +

4. Heatmap of sensor row count

+

These heatmaps are a per-sensor breakdown of Visualization 1 and Visualization 2. Note that the second row (ratio of valid yielded minutes) of this heatmap matches the respective participant (bottom) row the screenshot in Visualization 2.

+

In these heatmaps rows represent phone or Fitbit sensors, columns represent time segment instances and cell’s color shows the normalized (0 to 1) row count of each sensor within a time segment instance. RAPIDS creates one heatmap per participant and they can be used to judge missing data on a per participant and per sensor basis.

+

The figure below shows data for 14 phone sensors (including data yield) of example01’s daily segments. From the top two rows, we can see that the phone was sensing data for most of the monitoring period (as suggested by Figure 3 and Figure 4). We can also infer how phone usage influenced the different sensor streams; there are peaks of screen events during the first day (Apr 23rd), peaks of location coordinates on Apr 26th and Apr 30th, and no sent or received SMS except for Apr 23rd, Apr 29th and Apr 30th (unlabeled row between screen and locations).

+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+
+ +
Heatmap of the sensor row count per time segment of a single participant
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/visualizations/feature-visualizations/index.html b/1.3/visualizations/feature-visualizations/index.html new file mode 100644 index 00000000..a9eb3a42 --- /dev/null +++ b/1.3/visualizations/feature-visualizations/index.html @@ -0,0 +1,1938 @@ + + + + + + + + + + + + + + + + + + + + + + Features - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Feature Visualizations

+

1. Heatmap Correlation Matrix

+

Columns and rows are the behavioral features computed in RAPIDS, cells’ color represents the correlation coefficient between all days of data for every pair of features of all participants.

+

The user can specify a minimum number of observations (time segment instances) required to compute the correlation between two features using the MIN_ROWS_RATIO parameter (0.5 by default) and the correlation method (Pearson, Spearman or Kendall) with the CORR_METHOD parameter. In addition, this plot can be configured to only display correlation coefficients above a threshold using the CORR_THRESHOLD parameter (0.1 by default).

+
+

Example

+

Click here to see an example of these interactive visualizations in HTML format

+
+
+ +
Correlation matrix heatmap for all the features of all participants
+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/workflow-examples/analysis/index.html b/1.3/workflow-examples/analysis/index.html new file mode 100644 index 00000000..704ad36e --- /dev/null +++ b/1.3/workflow-examples/analysis/index.html @@ -0,0 +1,2052 @@ + + + + + + + + + + + + + + + + + + + + + + Complete Example - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + +
+
+ + + + + + + + +

Analysis Workflow Example

+
+

TL;DR

+
    +
  • In addition to using RAPIDS to extract behavioral features and create plots, you can structure your data analysis within RAPIDS (i.e. cleaning your features and creating ML/statistical models)
  • +
  • We include an analysis example in RAPIDS that covers raw data processing, cleaning, feature extraction, machine learning modeling, and evaluation
  • +
  • Use this example as a guide to structure your own analysis within RAPIDS
  • +
  • RAPIDS analysis workflows are compatible with your favorite data science tools and libraries
  • +
  • RAPIDS analysis workflows are reproducible and we encourage you to publish them along with your research papers
  • +
+
+

Why should I integrate my analysis in RAPIDS?

+

Even though the bulk of RAPIDS current functionality is related to the computation of behavioral features, we recommend RAPIDS as a complementary tool to create a mobile data analysis workflow. This is because the cookiecutter data science file organization guidelines, the use of Snakemake, the provided behavioral features, and the reproducible R and Python development environments allow researchers to divide an analysis workflow into small parts that can be audited, shared in an online repository, reproduced in other computers, and understood by other people as they follow a familiar and consistent structure. We believe these advantages outweigh the time needed to learn how to create these workflows in RAPIDS.

+

We clarify that to create analysis workflows in RAPIDS, researchers can still use any data manipulation tools, editors, libraries or languages they are already familiar with. RAPIDS is meant to be the final destination of analysis code that was developed in interactive notebooks or stand-alone scripts. For example, a user can compute call and location features using RAPIDS, then, they can use Jupyter notebooks to explore feature cleaning approaches and once the cleaning code is final, it can be moved to RAPIDS as a new step in the pipeline. In turn, the output of this cleaning step can be used to explore machine learning models and once a model is finished, it can also be transferred to RAPIDS as a step of its own. The idea is that when it is time to publish a piece of research, a RAPIDS workflow can be shared in a public repository as is.

+

In the following sections we share an example of how we structured an analysis workflow in RAPIDS.

+

Analysis workflow structure

+

To accurately reflect the complexity of a real-world modeling scenario, we decided not to oversimplify this example. Importantly, every step in this example follows a basic structure: an input file and parameters are manipulated by an R or Python script that saves the results to an output file. Input files, parameters, output files and scripts are grouped into Snakemake rules that are described on smk files in the rules folder (we point the reader to the relevant rule(s) of each step).

+

Researchers can use these rules and scripts as a guide to create their own as it is expected every modeling project will have different requirements, data and goals but ultimately most follow a similar chainned pattern.

+
+

Hint

+

The example’s config file is example_profile/example_config.yaml and its Snakefile is in example_profile/Snakefile. The config file is already configured to process the sensor data as explained in Analysis workflow modules.

+
+

Description of the study modeled in our analysis workflow example

+

Our example is based on a hypothetical study that recruited 2 participants that underwent surgery and collected mobile data for at least one week before and one week after the procedure. Participants wore a Fitbit device and installed the AWARE client in their personal Android and iOS smartphones to collect mobile data 24/7. In addition, participants completed daily severity ratings of 12 common symptoms on a scale from 0 to 10 that we summed up into a daily symptom burden score.

+

The goal of this workflow is to find out if we can predict the daily symptom burden score of a participant. Thus, we framed this question as a binary classification problem with two classes, high and low symptom burden based on the scores above and below average of each participant. We also want to compare the performance of individual (personalized) models vs a population model.

+

In total, our example workflow has nine steps that are in charge of sensor data preprocessing, feature extraction, feature cleaning, machine learning model training and model evaluation (see figure below). We ship this workflow with RAPIDS and share files with test data in an Open Science Framework repository.

+
+ +
Modules of RAPIDS example workflow, from raw data to model evaluation
+
+ +

Configure and run the analysis workflow example

+
    +
  1. Install RAPIDS
  2. +
  3. Unzip the CSV files inside rapids_example_csv.zip in data/external/example_workflow/*.csv.
  4. +
  5. Create the participant files for this example by running: +
    ./rapids -j1 create_example_participant_files
    +
  6. +
  7. Run the example pipeline with: +
    ./rapids -j1 --profile example_profile
    +
  8. +
+

Note you will see a lot of warning messages, you can ignore them since they happen because we ran ML algorithms with a small fake dataset.

+

Modules of our analysis workflow example

+
1. Feature extraction

We extract daily behavioral features for data yield, received and sent messages, missed, incoming and outgoing calls, resample fused location data using Doryab provider, activity recognition, battery, Bluetooth, screen, light, applications foreground, conversations, Wi-Fi connected, Wi-Fi visible, Fitbit heart rate summary and intraday data, Fitbit sleep summary data, and Fitbit step summary and intraday data without excluding sleep periods with an active bout threshold of 10 steps. In total, we obtained 237 daily sensor features over 12 days per participant.

+
+
2. Extract demographic data.

It is common to have demographic data in addition to mobile and target (ground truth) data. In this example we include participants’ age, gender and the number of days they spent in hospital after their surgery as features in our model. We extract these three columns from the data/external/example_workflow/participant_info.csv file. As these three features remain the same within participants, they are used only on the population model. Refer to the demographic_features rule in rules/models.smk.

+
+
3. Create target labels.

The two classes for our machine learning binary classification problem are high and low symptom burden. Target values are already stored in the data/external/example_workflow/participant_target.csv file. A new rule/script can be created if further manipulation is necessary. Refer to the parse_targets rule in rules/models.smk.

+
+
4. Feature merging.

These daily features are stored on a CSV file per sensor, a CSV file per participant, and a CSV file including all features from all participants (in every case each column represents a feature and each row represents a day). Refer to the merge_sensor_features_for_individual_participants and merge_sensor_features_for_all_participants rules in rules/features.smk.

+
+
5. Data visualization.

At this point the user can use the five plots RAPIDS provides (or implement new ones) to explore and understand the quality of the raw data and extracted features and decide what sensors, days, or participants to include and exclude. Refer to rules/reports.smk to find the rules that generate these plots.

+
+
6. Feature cleaning.

In this stage we perform four steps to clean our sensor feature file. First, we discard days with a data yield hour ratio less than or equal to 0.75, i.e. we include days with at least 18 hours of data. Second, we drop columns (features) with more than 30% of missing rows. Third, we drop columns with zero variance. Fourth, we drop rows (days) with more than 30% of missing columns (features). In this cleaning stage several parameters are created and exposed in example_profile/example_config.yaml.

+

After this step, we kept 158 features over 11 days for the individual model of p01, 101 features over 12 days for the individual model of p02 and 106 features over 20 days for the population model. Note that the difference in the number of features between p01 and p02 is mostly due to iOS restrictions that stops researchers from collecting the same number of sensors than in Android phones.

+

Feature cleaning for the individual models is done in the clean_sensor_features_for_individual_participants rule and for the population model in the clean_sensor_features_for_all_participants rule in rules/models.smk.

+
+
7. Merge features and targets.

In this step we merge the cleaned features and target labels for our individual models in the merge_features_and_targets_for_individual_model rule in rules/models.smk. Additionally, we merge the cleaned features, target labels, and demographic features of our two participants for the population model in the merge_features_and_targets_for_population_model rule in rules/models.smk. These two merged files are the input for our individual and population models.

+
+
8. Modelling.

This stage has three phases: model building, training and evaluation.

+

In the building phase we impute, normalize and oversample our dataset. Missing numeric values in each column are imputed with their mean and we impute missing categorical values with their mode. We normalize each numeric column with one of three strategies (min-max, z-score, and scikit-learn package’s robust scaler) and we one-hot encode each categorial feature as a numerical array. We oversample our imbalanced dataset using SMOTE (Synthetic Minority Over-sampling Technique) or a Random Over sampler from scikit-learn. All these parameters are exposed in example_profile/example_config.yaml.

+

In the training phase, we create eight models: logistic regression, k-nearest neighbors, support vector machine, decision tree, random forest, gradient boosting classifier, extreme gradient boosting classifier and a light gradient boosting machine. We cross-validate each model with an inner cycle to tune hyper-parameters based on the Macro F1 score and an outer cycle to predict the test set on a model with the best hyper-parameters. Both cross-validation cycles use a leave-one-out strategy. Parameters for each model like weights and learning rates are exposed in example_profile/example_config.yaml.

+

Finally, in the evaluation phase we compute the accuracy, Macro F1, kappa, area under the curve and per class precision, recall and F1 score of all folds of the outer cross-validation cycle.

+

Refer to the modelling_for_individual_participants rule for the individual modeling and to the modelling_for_all_participants rule for the population modeling, both in rules/models.smk.

+
+
9. Compute model baselines.

We create three baselines to evaluate our classification models.

+

First, a majority classifier that labels each test sample with the majority class of our training data. Second, a random weighted classifier that predicts each test observation sampling at random from a binomial distribution based on the ratio of our target labels. Third, a decision tree classifier based solely on the demographic features of each participant. As we do not have demographic features for individual model, this baseline is only available for population model.

+

Our baseline metrics (e.g. accuracy, precision, etc.) are saved into a CSV file, ready to be compared to our modeling results. Refer to the baselines_for_individual_model rule for the individual model baselines and to the baselines_for_population_model rule for population model baselines, both in rules/models.smk.

+
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/1.3/workflow-examples/minimal/index.html b/1.3/workflow-examples/minimal/index.html new file mode 100644 index 00000000..383742a5 --- /dev/null +++ b/1.3/workflow-examples/minimal/index.html @@ -0,0 +1,2047 @@ + + + + + + + + + + + + + + + + + + + + + + Minimal Example - RAPIDS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + +
+
+
+ + +
+
+ + + + + + + + +

Minimal Working Example

+

This is a quick guide for creating and running a simple pipeline to extract missing, outgoing, and incoming call features for 24 hr (00:00:00 to 23:59:59) and night (00:00:00 to 05:59:59) time segments of every day of data of one participant that was monitored on the US East coast with an Android smartphone.

+
    +
  1. Install RAPIDS and make sure your conda environment is active (see Installation)
  2. +
  3. Download this CSV file and save it as data/external/aware_csv/calls.csv
  4. +
  5. +

    Make the changes listed below for the corresponding Configuration step (we provide an example of what the relevant sections in your config.yml will look like after you are done)

    +
    Required configuration changes (click to expand)
      +
    1. +

      Supported data streams.

      +

      Based on the docs, we decided to use the aware_csv data stream because we are processing aware data saved in a CSV file. We will use this label in a later step; there’s no need to type it or save it anywhere yet.

      +
    2. +
    3. +

      Create your participants file.

      +

      Since we are processing data from a single participant, you only need to create a single participant file called p01.yaml in data/external/participant_files. This participant file only has a PHONE section because this hypothetical participant was only monitored with a smartphone. Note that for a real analysis, you can do this automatically with a CSV file

      +
        +
      1. +

        Add p01 to [PIDS] in config.yaml

        +
      2. +
      3. +

        Create a file in data/external/participant_files/p01.yaml with the following content:

        +
        PHONE:
        +    DEVICE_IDS: [a748ee1a-1d0b-4ae9-9074-279a2b6ba524] # the participant's AWARE device id
        +    PLATFORMS: [android] # or ios
        +    LABEL: MyTestP01 # any string
        +    START_DATE: 2020-01-01 # this can also be empty
        +    END_DATE: 2021-01-01 # this can also be empty
        +
        +
      4. +
      +
    4. +
    5. +

      Select what time segments you want to extract features on.

      +
        +
      1. +

        Set [TIME_SEGMENTS][FILE] to data/external/timesegments_periodic.csv

        +
      2. +
      3. +

        Create a file in data/external/timesegments_periodic.csv with the following content

        +
        label,start_time,length,repeats_on,repeats_value
        +daily,00:00:00,23H 59M 59S,every_day,0
        +night,00:00:00,5H 59M 59S,every_day,0
        +
        +
      4. +
      +
    6. +
    7. +

      Choose the timezone of your study.

      +

      We will use the default time zone settings since this example is processing data collected on the US East Coast (America/New_York)

      +
      TIMEZONE: 
      +    TYPE: SINGLE
      +    SINGLE:
      +        TZCODE: America/New_York
      +
      +
    8. +
    9. +

      Modify your device data stream configuration

      +
        +
      1. +

        Set [PHONE_DATA_STREAMS][USE] to aware_csv.

        +
      2. +
      3. +

        We will use the default value for [PHONE_DATA_STREAMS][aware_csv][FOLDER] since we already stored the test calls CSV file there.

        +
      4. +
      +
    10. +
    11. +

      Select what sensors and features you want to process.

      +
        +
      1. +

        Set [PHONE_CALLS][CONTAINER] to calls.csv in the config.yaml file.

        +
      2. +
      3. +

        Set [PHONE_CALLS][PROVIDERS][RAPIDS][COMPUTE] to True in the config.yaml file.

        +
      4. +
      +
    12. +
    +
    +
    +

    Example of the config.yaml sections after the changes outlined above

    +

    This will be your config.yaml after following the instructions above. Click on the numbered markers to know more.

    +
    PIDS: [p01] # (1)
    +
    +TIMEZONE:
    +    TYPE: SINGLE # (2)
    +    SINGLE:
    +        TZCODE: America/New_York
    +
    +# ... other irrelevant sections
    +
    +TIME_SEGMENTS: &time_segments
    +    TYPE: PERIODIC # (3)
    +    FILE: "data/external/timesegments_periodic.csv" # (4)
    +    INCLUDE_PAST_PERIODIC_SEGMENTS: FALSE
    +
    +PHONE_DATA_STREAMS:
    +    USE: aware_csv # (5)
    +
    +    aware_csv:
    +        FOLDER: data/external/aware_csv # (6)
    +
    +# ... other irrelevant sections
    +
    +############## PHONE ###########################################################
    +################################################################################
    +
    +# ... other irrelevant sections
    +
    +# Communication call features config, TYPES and FEATURES keys need to match
    +PHONE_CALLS:
    +    CONTAINER: calls.csv  # (7) 
    +    PROVIDERS:
    +        RAPIDS:
    +            COMPUTE: True # (8)
    +            CALL_TYPES: ...
    +
    +
      +
    1. +

      We added p01 to PIDS after creating the participant file: +

      data/external/participant_files/p01.yaml
      +

      +

      With the following content: +

      PHONE:
      +    DEVICE_IDS: [a748ee1a-1d0b-4ae9-9074-279a2b6ba524] # the participant's AWARE device id
      +    PLATFORMS: [android] # or ios
      +    LABEL: MyTestP01 # any string
      +    START_DATE: 2020-01-01 # this can also be empty
      +    END_DATE: 2021-01-01 # this can also be empty
      +

      +
    2. +
    3. +

      We use the default SINGLE time zone.

      +
    4. +
    5. +

      We use the default PERIODIC time segment [TYPE]

      +
    6. +
    7. +

      We created this time segments file with these lines:

      +
      label,start_time,length,repeats_on,repeats_value
      +daily,00:00:00,23H 59M 59S,every_day,0
      +night,001:00:00,5H 59M 59S,every_day,0
      +
      +
    8. +
    9. +

      We set [USE] to aware_device to tell RAPIDS to process sensor data collected with the AWARE Framework stored in CSV files.

      +
    10. +
    11. +

      We used the default [FOLDER] for awre_csv since we already stored our test calls.csv file there

      +
    12. +
    13. +

      We changed [CONTAINER] to calls.csv to process our test call data.

      +
    14. +
    15. +

      We flipped [COMPUTE] to True to extract call behavioral features using the RAPIDS feature provider.

      +
    16. +
    +
    +
  6. +
  7. +

    Run RAPIDS +

    ./rapids -j1
    +

    +
  8. +
  9. The call features for daily and morning time segments will be in +
    data/processed/features/all_participants/all_sensor_features.csv
    +
  10. +
+ + + + + + +

Comments

+ + + + + + +
+ +
+
+ +
+ + + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/latest/404.html b/latest/404.html index 8b58fa6d..8aa2a4aa 100644 --- a/latest/404.html +++ b/latest/404.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../1.2/404.html... + Redirecting to ../1.3/404.html... \ No newline at end of file diff --git a/latest/change-log/index.html b/latest/change-log/index.html index 3891acbf..3f7c3f73 100644 --- a/latest/change-log/index.html +++ b/latest/change-log/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/change-log/... + Redirecting to ../../1.3/change-log/... \ No newline at end of file diff --git a/latest/citation/index.html b/latest/citation/index.html index ca81f88a..30f47858 100644 --- a/latest/citation/index.html +++ b/latest/citation/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/citation/... + Redirecting to ../../1.3/citation/... \ No newline at end of file diff --git a/latest/code_of_conduct/index.html b/latest/code_of_conduct/index.html index 8e47f44f..c0d64ae3 100644 --- a/latest/code_of_conduct/index.html +++ b/latest/code_of_conduct/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/code_of_conduct/... + Redirecting to ../../1.3/code_of_conduct/... \ No newline at end of file diff --git a/latest/common-errors/index.html b/latest/common-errors/index.html index 367a8a8b..f2000549 100644 --- a/latest/common-errors/index.html +++ b/latest/common-errors/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/common-errors/... + Redirecting to ../../1.3/common-errors/... \ No newline at end of file diff --git a/latest/contributing/index.html b/latest/contributing/index.html index aaf44eb4..b6995ca6 100644 --- a/latest/contributing/index.html +++ b/latest/contributing/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/contributing/... + Redirecting to ../../1.3/contributing/... \ No newline at end of file diff --git a/latest/datastreams/add-new-data-streams/index.html b/latest/datastreams/add-new-data-streams/index.html index 4f849693..794e4776 100644 --- a/latest/datastreams/add-new-data-streams/index.html +++ b/latest/datastreams/add-new-data-streams/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/add-new-data-streams/... + Redirecting to ../../../1.3/datastreams/add-new-data-streams/... \ No newline at end of file diff --git a/latest/datastreams/aware-csv/index.html b/latest/datastreams/aware-csv/index.html index 1a918cd7..3243aaea 100644 --- a/latest/datastreams/aware-csv/index.html +++ b/latest/datastreams/aware-csv/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/aware-csv/... + Redirecting to ../../../1.3/datastreams/aware-csv/... \ No newline at end of file diff --git a/latest/datastreams/aware-influxdb/index.html b/latest/datastreams/aware-influxdb/index.html index 2ce9f0a6..d2e99246 100644 --- a/latest/datastreams/aware-influxdb/index.html +++ b/latest/datastreams/aware-influxdb/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/aware-influxdb/... + Redirecting to ../../../1.3/datastreams/aware-influxdb/... \ No newline at end of file diff --git a/latest/datastreams/aware-mysql/index.html b/latest/datastreams/aware-mysql/index.html index 7d58f72f..3c4877a7 100644 --- a/latest/datastreams/aware-mysql/index.html +++ b/latest/datastreams/aware-mysql/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/aware-mysql/... + Redirecting to ../../../1.3/datastreams/aware-mysql/... \ No newline at end of file diff --git a/latest/datastreams/data-streams-introduction/index.html b/latest/datastreams/data-streams-introduction/index.html index 8bbe603d..72e0a871 100644 --- a/latest/datastreams/data-streams-introduction/index.html +++ b/latest/datastreams/data-streams-introduction/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/data-streams-introduction/... + Redirecting to ../../../1.3/datastreams/data-streams-introduction/... \ No newline at end of file diff --git a/latest/datastreams/empatica-zip/index.html b/latest/datastreams/empatica-zip/index.html index 06ca994c..d4a81355 100644 --- a/latest/datastreams/empatica-zip/index.html +++ b/latest/datastreams/empatica-zip/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/empatica-zip/... + Redirecting to ../../../1.3/datastreams/empatica-zip/... \ No newline at end of file diff --git a/latest/datastreams/fitbitjson-csv/index.html b/latest/datastreams/fitbitjson-csv/index.html index 2fa26d26..99526c1b 100644 --- a/latest/datastreams/fitbitjson-csv/index.html +++ b/latest/datastreams/fitbitjson-csv/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/fitbitjson-csv/... + Redirecting to ../../../1.3/datastreams/fitbitjson-csv/... \ No newline at end of file diff --git a/latest/datastreams/fitbitjson-mysql/index.html b/latest/datastreams/fitbitjson-mysql/index.html index c5442398..34a72c85 100644 --- a/latest/datastreams/fitbitjson-mysql/index.html +++ b/latest/datastreams/fitbitjson-mysql/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/fitbitjson-mysql/... + Redirecting to ../../../1.3/datastreams/fitbitjson-mysql/... \ No newline at end of file diff --git a/latest/datastreams/fitbitparsed-csv/index.html b/latest/datastreams/fitbitparsed-csv/index.html index 9d052dda..2418b93c 100644 --- a/latest/datastreams/fitbitparsed-csv/index.html +++ b/latest/datastreams/fitbitparsed-csv/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/fitbitparsed-csv/... + Redirecting to ../../../1.3/datastreams/fitbitparsed-csv/... \ No newline at end of file diff --git a/latest/datastreams/fitbitparsed-mysql/index.html b/latest/datastreams/fitbitparsed-mysql/index.html index 01a84c46..acf26682 100644 --- a/latest/datastreams/fitbitparsed-mysql/index.html +++ b/latest/datastreams/fitbitparsed-mysql/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/fitbitparsed-mysql/... + Redirecting to ../../../1.3/datastreams/fitbitparsed-mysql/... \ No newline at end of file diff --git a/latest/datastreams/mandatory-empatica-format/index.html b/latest/datastreams/mandatory-empatica-format/index.html index 32f85251..a38f9543 100644 --- a/latest/datastreams/mandatory-empatica-format/index.html +++ b/latest/datastreams/mandatory-empatica-format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/mandatory-empatica-format/... + Redirecting to ../../../1.3/datastreams/mandatory-empatica-format/... \ No newline at end of file diff --git a/latest/datastreams/mandatory-fitbit-format/index.html b/latest/datastreams/mandatory-fitbit-format/index.html index 0f930605..fd20b4a7 100644 --- a/latest/datastreams/mandatory-fitbit-format/index.html +++ b/latest/datastreams/mandatory-fitbit-format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/mandatory-fitbit-format/... + Redirecting to ../../../1.3/datastreams/mandatory-fitbit-format/... \ No newline at end of file diff --git a/latest/datastreams/mandatory-phone-format/index.html b/latest/datastreams/mandatory-phone-format/index.html index c5c3a668..2bbb4b65 100644 --- a/latest/datastreams/mandatory-phone-format/index.html +++ b/latest/datastreams/mandatory-phone-format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/datastreams/mandatory-phone-format/... + Redirecting to ../../../1.3/datastreams/mandatory-phone-format/... \ No newline at end of file diff --git a/latest/developers/documentation/index.html b/latest/developers/documentation/index.html index ecf1e12b..c6dd937b 100644 --- a/latest/developers/documentation/index.html +++ b/latest/developers/documentation/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/documentation/... + Redirecting to ../../../1.3/developers/documentation/... \ No newline at end of file diff --git a/latest/developers/git-flow/index.html b/latest/developers/git-flow/index.html index 2069c9c3..cf3369ce 100644 --- a/latest/developers/git-flow/index.html +++ b/latest/developers/git-flow/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/git-flow/... + Redirecting to ../../../1.3/developers/git-flow/... \ No newline at end of file diff --git a/latest/developers/remote-support/index.html b/latest/developers/remote-support/index.html index 3b619dbd..ba7b2e38 100644 --- a/latest/developers/remote-support/index.html +++ b/latest/developers/remote-support/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/remote-support/... + Redirecting to ../../../1.3/developers/remote-support/... \ No newline at end of file diff --git a/latest/developers/test-cases/index.html b/latest/developers/test-cases/index.html index d9406dcb..95b3cc8f 100644 --- a/latest/developers/test-cases/index.html +++ b/latest/developers/test-cases/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/test-cases/... + Redirecting to ../../../1.3/developers/test-cases/... \ No newline at end of file diff --git a/latest/developers/testing/index.html b/latest/developers/testing/index.html index 2f557817..bf8aea94 100644 --- a/latest/developers/testing/index.html +++ b/latest/developers/testing/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/testing/... + Redirecting to ../../../1.3/developers/testing/... \ No newline at end of file diff --git a/latest/developers/validation-schema-config/index.html b/latest/developers/validation-schema-config/index.html index 06443d9a..9ea79a90 100644 --- a/latest/developers/validation-schema-config/index.html +++ b/latest/developers/validation-schema-config/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/validation-schema-config/... + Redirecting to ../../../1.3/developers/validation-schema-config/... \ No newline at end of file diff --git a/latest/developers/virtual-environments/index.html b/latest/developers/virtual-environments/index.html index 90dd9b12..cc2c7e34 100644 --- a/latest/developers/virtual-environments/index.html +++ b/latest/developers/virtual-environments/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/developers/virtual-environments/... + Redirecting to ../../../1.3/developers/virtual-environments/... \ No newline at end of file diff --git a/latest/features/add-new-features/index.html b/latest/features/add-new-features/index.html index da44f60d..d34617af 100644 --- a/latest/features/add-new-features/index.html +++ b/latest/features/add-new-features/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/add-new-features/... + Redirecting to ../../../1.3/features/add-new-features/... \ No newline at end of file diff --git a/latest/features/empatica-accelerometer/index.html b/latest/features/empatica-accelerometer/index.html index a9e6222e..4fe7e374 100644 --- a/latest/features/empatica-accelerometer/index.html +++ b/latest/features/empatica-accelerometer/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-accelerometer/... + Redirecting to ../../../1.3/features/empatica-accelerometer/... \ No newline at end of file diff --git a/latest/features/empatica-blood-volume-pulse/index.html b/latest/features/empatica-blood-volume-pulse/index.html index 9e279db2..af12560b 100644 --- a/latest/features/empatica-blood-volume-pulse/index.html +++ b/latest/features/empatica-blood-volume-pulse/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-blood-volume-pulse/... + Redirecting to ../../../1.3/features/empatica-blood-volume-pulse/... \ No newline at end of file diff --git a/latest/features/empatica-electrodermal-activity/index.html b/latest/features/empatica-electrodermal-activity/index.html index 686c99bf..6d0fb331 100644 --- a/latest/features/empatica-electrodermal-activity/index.html +++ b/latest/features/empatica-electrodermal-activity/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-electrodermal-activity/... + Redirecting to ../../../1.3/features/empatica-electrodermal-activity/... \ No newline at end of file diff --git a/latest/features/empatica-heartrate/index.html b/latest/features/empatica-heartrate/index.html index 0685c230..02d0d255 100644 --- a/latest/features/empatica-heartrate/index.html +++ b/latest/features/empatica-heartrate/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-heartrate/... + Redirecting to ../../../1.3/features/empatica-heartrate/... \ No newline at end of file diff --git a/latest/features/empatica-inter-beat-interval/index.html b/latest/features/empatica-inter-beat-interval/index.html index 340973d9..0294cc55 100644 --- a/latest/features/empatica-inter-beat-interval/index.html +++ b/latest/features/empatica-inter-beat-interval/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-inter-beat-interval/... + Redirecting to ../../../1.3/features/empatica-inter-beat-interval/... \ No newline at end of file diff --git a/latest/features/empatica-tags/index.html b/latest/features/empatica-tags/index.html index a10ac0e6..0698356d 100644 --- a/latest/features/empatica-tags/index.html +++ b/latest/features/empatica-tags/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-tags/... + Redirecting to ../../../1.3/features/empatica-tags/... \ No newline at end of file diff --git a/latest/features/empatica-temperature/index.html b/latest/features/empatica-temperature/index.html index fd6ed0a9..bb8c7126 100644 --- a/latest/features/empatica-temperature/index.html +++ b/latest/features/empatica-temperature/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/empatica-temperature/... + Redirecting to ../../../1.3/features/empatica-temperature/... \ No newline at end of file diff --git a/latest/features/feature-introduction/index.html b/latest/features/feature-introduction/index.html index 2652e9e4..077093be 100644 --- a/latest/features/feature-introduction/index.html +++ b/latest/features/feature-introduction/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/feature-introduction/... + Redirecting to ../../../1.3/features/feature-introduction/... \ No newline at end of file diff --git a/latest/features/fitbit-calories-intraday/index.html b/latest/features/fitbit-calories-intraday/index.html index 180c0875..1b4e85bb 100644 --- a/latest/features/fitbit-calories-intraday/index.html +++ b/latest/features/fitbit-calories-intraday/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-calories-intraday/... + Redirecting to ../../../1.3/features/fitbit-calories-intraday/... \ No newline at end of file diff --git a/latest/features/fitbit-data-yield/index.html b/latest/features/fitbit-data-yield/index.html index c669689b..4b49c514 100644 --- a/latest/features/fitbit-data-yield/index.html +++ b/latest/features/fitbit-data-yield/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-data-yield/... + Redirecting to ../../../1.3/features/fitbit-data-yield/... \ No newline at end of file diff --git a/latest/features/fitbit-heartrate-intraday/index.html b/latest/features/fitbit-heartrate-intraday/index.html index fb762352..1c7c17e8 100644 --- a/latest/features/fitbit-heartrate-intraday/index.html +++ b/latest/features/fitbit-heartrate-intraday/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-heartrate-intraday/... + Redirecting to ../../../1.3/features/fitbit-heartrate-intraday/... \ No newline at end of file diff --git a/latest/features/fitbit-heartrate-summary/index.html b/latest/features/fitbit-heartrate-summary/index.html index 9c246c09..3eea638c 100644 --- a/latest/features/fitbit-heartrate-summary/index.html +++ b/latest/features/fitbit-heartrate-summary/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-heartrate-summary/... + Redirecting to ../../../1.3/features/fitbit-heartrate-summary/... \ No newline at end of file diff --git a/latest/features/fitbit-sleep-intraday/index.html b/latest/features/fitbit-sleep-intraday/index.html index 990ec45a..fae035da 100644 --- a/latest/features/fitbit-sleep-intraday/index.html +++ b/latest/features/fitbit-sleep-intraday/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-sleep-intraday/... + Redirecting to ../../../1.3/features/fitbit-sleep-intraday/... \ No newline at end of file diff --git a/latest/features/fitbit-sleep-summary/index.html b/latest/features/fitbit-sleep-summary/index.html index 4e5d698e..51d298d8 100644 --- a/latest/features/fitbit-sleep-summary/index.html +++ b/latest/features/fitbit-sleep-summary/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-sleep-summary/... + Redirecting to ../../../1.3/features/fitbit-sleep-summary/... \ No newline at end of file diff --git a/latest/features/fitbit-steps-intraday/index.html b/latest/features/fitbit-steps-intraday/index.html index fbac9a80..ecc86c50 100644 --- a/latest/features/fitbit-steps-intraday/index.html +++ b/latest/features/fitbit-steps-intraday/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-steps-intraday/... + Redirecting to ../../../1.3/features/fitbit-steps-intraday/... \ No newline at end of file diff --git a/latest/features/fitbit-steps-summary/index.html b/latest/features/fitbit-steps-summary/index.html index 09b00ee9..7ba01152 100644 --- a/latest/features/fitbit-steps-summary/index.html +++ b/latest/features/fitbit-steps-summary/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/fitbit-steps-summary/... + Redirecting to ../../../1.3/features/fitbit-steps-summary/... \ No newline at end of file diff --git a/latest/features/phone-accelerometer/index.html b/latest/features/phone-accelerometer/index.html index 76ea4859..aca6de2a 100644 --- a/latest/features/phone-accelerometer/index.html +++ b/latest/features/phone-accelerometer/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-accelerometer/... + Redirecting to ../../../1.3/features/phone-accelerometer/... \ No newline at end of file diff --git a/latest/features/phone-activity-recognition/index.html b/latest/features/phone-activity-recognition/index.html index 14a09a2c..6699c0ae 100644 --- a/latest/features/phone-activity-recognition/index.html +++ b/latest/features/phone-activity-recognition/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-activity-recognition/... + Redirecting to ../../../1.3/features/phone-activity-recognition/... \ No newline at end of file diff --git a/latest/features/phone-applications-crashes/index.html b/latest/features/phone-applications-crashes/index.html index 4918ce7f..2fb58dec 100644 --- a/latest/features/phone-applications-crashes/index.html +++ b/latest/features/phone-applications-crashes/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-applications-crashes/... + Redirecting to ../../../1.3/features/phone-applications-crashes/... \ No newline at end of file diff --git a/latest/features/phone-applications-foreground/index.html b/latest/features/phone-applications-foreground/index.html index 0c7eb7f2..19e81af9 100644 --- a/latest/features/phone-applications-foreground/index.html +++ b/latest/features/phone-applications-foreground/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-applications-foreground/... + Redirecting to ../../../1.3/features/phone-applications-foreground/... \ No newline at end of file diff --git a/latest/features/phone-applications-notifications/index.html b/latest/features/phone-applications-notifications/index.html index afe09dcd..36b09161 100644 --- a/latest/features/phone-applications-notifications/index.html +++ b/latest/features/phone-applications-notifications/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-applications-notifications/... + Redirecting to ../../../1.3/features/phone-applications-notifications/... \ No newline at end of file diff --git a/latest/features/phone-battery/index.html b/latest/features/phone-battery/index.html index 47f7851e..f8dd42ef 100644 --- a/latest/features/phone-battery/index.html +++ b/latest/features/phone-battery/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-battery/... + Redirecting to ../../../1.3/features/phone-battery/... \ No newline at end of file diff --git a/latest/features/phone-bluetooth/index.html b/latest/features/phone-bluetooth/index.html index 18869307..8b82314d 100644 --- a/latest/features/phone-bluetooth/index.html +++ b/latest/features/phone-bluetooth/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-bluetooth/... + Redirecting to ../../../1.3/features/phone-bluetooth/... \ No newline at end of file diff --git a/latest/features/phone-calls/index.html b/latest/features/phone-calls/index.html index a52fb429..cc90fe56 100644 --- a/latest/features/phone-calls/index.html +++ b/latest/features/phone-calls/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-calls/... + Redirecting to ../../../1.3/features/phone-calls/... \ No newline at end of file diff --git a/latest/features/phone-conversation/index.html b/latest/features/phone-conversation/index.html index 270d415e..5e3aba6a 100644 --- a/latest/features/phone-conversation/index.html +++ b/latest/features/phone-conversation/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-conversation/... + Redirecting to ../../../1.3/features/phone-conversation/... \ No newline at end of file diff --git a/latest/features/phone-data-yield/index.html b/latest/features/phone-data-yield/index.html index c9c5d138..0e78655f 100644 --- a/latest/features/phone-data-yield/index.html +++ b/latest/features/phone-data-yield/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-data-yield/... + Redirecting to ../../../1.3/features/phone-data-yield/... \ No newline at end of file diff --git a/latest/features/phone-keyboard/index.html b/latest/features/phone-keyboard/index.html index 4372e1e2..112478e2 100644 --- a/latest/features/phone-keyboard/index.html +++ b/latest/features/phone-keyboard/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-keyboard/... + Redirecting to ../../../1.3/features/phone-keyboard/... \ No newline at end of file diff --git a/latest/features/phone-light/index.html b/latest/features/phone-light/index.html index 3d60d181..87474c4b 100644 --- a/latest/features/phone-light/index.html +++ b/latest/features/phone-light/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-light/... + Redirecting to ../../../1.3/features/phone-light/... \ No newline at end of file diff --git a/latest/features/phone-locations/index.html b/latest/features/phone-locations/index.html index 47587c53..64b2dff2 100644 --- a/latest/features/phone-locations/index.html +++ b/latest/features/phone-locations/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-locations/... + Redirecting to ../../../1.3/features/phone-locations/... \ No newline at end of file diff --git a/latest/features/phone-log/index.html b/latest/features/phone-log/index.html index ba492971..e9b63ce8 100644 --- a/latest/features/phone-log/index.html +++ b/latest/features/phone-log/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-log/... + Redirecting to ../../../1.3/features/phone-log/... \ No newline at end of file diff --git a/latest/features/phone-messages/index.html b/latest/features/phone-messages/index.html index 42a00ed7..8dfd1791 100644 --- a/latest/features/phone-messages/index.html +++ b/latest/features/phone-messages/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-messages/... + Redirecting to ../../../1.3/features/phone-messages/... \ No newline at end of file diff --git a/latest/features/phone-screen/index.html b/latest/features/phone-screen/index.html index d94b00ae..101d1220 100644 --- a/latest/features/phone-screen/index.html +++ b/latest/features/phone-screen/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-screen/... + Redirecting to ../../../1.3/features/phone-screen/... \ No newline at end of file diff --git a/latest/features/phone-wifi-connected/index.html b/latest/features/phone-wifi-connected/index.html index 914548ba..b4f3e3c9 100644 --- a/latest/features/phone-wifi-connected/index.html +++ b/latest/features/phone-wifi-connected/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-wifi-connected/... + Redirecting to ../../../1.3/features/phone-wifi-connected/... \ No newline at end of file diff --git a/latest/features/phone-wifi-visible/index.html b/latest/features/phone-wifi-visible/index.html index 378eef8a..d394a4f7 100644 --- a/latest/features/phone-wifi-visible/index.html +++ b/latest/features/phone-wifi-visible/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/features/phone-wifi-visible/... + Redirecting to ../../../1.3/features/phone-wifi-visible/... \ No newline at end of file diff --git a/latest/img/h-data-yield.html b/latest/img/h-data-yield.html index 0adc32cc..8e6831d0 100644 --- a/latest/img/h-data-yield.html +++ b/latest/img/h-data-yield.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/h-data-yield.html... + Redirecting to ../../1.3/img/h-data-yield.html... \ No newline at end of file diff --git a/latest/img/hm-data-yield-participants-absolute-time.html b/latest/img/hm-data-yield-participants-absolute-time.html index dd1e4e1a..724b7382 100644 --- a/latest/img/hm-data-yield-participants-absolute-time.html +++ b/latest/img/hm-data-yield-participants-absolute-time.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-data-yield-participants-absolute-time.html... + Redirecting to ../../1.3/img/hm-data-yield-participants-absolute-time.html... \ No newline at end of file diff --git a/latest/img/hm-data-yield-participants-relative-time.html b/latest/img/hm-data-yield-participants-relative-time.html index f1658e0b..ed2341b5 100644 --- a/latest/img/hm-data-yield-participants-relative-time.html +++ b/latest/img/hm-data-yield-participants-relative-time.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-data-yield-participants-relative-time.html... + Redirecting to ../../1.3/img/hm-data-yield-participants-relative-time.html... \ No newline at end of file diff --git a/latest/img/hm-data-yield-participants.html b/latest/img/hm-data-yield-participants.html index 9138d95b..51ba29e4 100644 --- a/latest/img/hm-data-yield-participants.html +++ b/latest/img/hm-data-yield-participants.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-data-yield-participants.html... + Redirecting to ../../1.3/img/hm-data-yield-participants.html... \ No newline at end of file diff --git a/latest/img/hm-feature-correlations.html b/latest/img/hm-feature-correlations.html index 3c5cef19..490a60fc 100644 --- a/latest/img/hm-feature-correlations.html +++ b/latest/img/hm-feature-correlations.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-feature-correlations.html... + Redirecting to ../../1.3/img/hm-feature-correlations.html... \ No newline at end of file diff --git a/latest/img/hm-phone-sensors.html b/latest/img/hm-phone-sensors.html index 198f6f80..383fefa1 100644 --- a/latest/img/hm-phone-sensors.html +++ b/latest/img/hm-phone-sensors.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-phone-sensors.html... + Redirecting to ../../1.3/img/hm-phone-sensors.html... \ No newline at end of file diff --git a/latest/img/hm-sensor-rows.html b/latest/img/hm-sensor-rows.html index 42b40eb5..c1909c19 100644 --- a/latest/img/hm-sensor-rows.html +++ b/latest/img/hm-sensor-rows.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/img/hm-sensor-rows.html... + Redirecting to ../../1.3/img/hm-sensor-rows.html... \ No newline at end of file diff --git a/latest/index.html b/latest/index.html index bbdf1e9a..249ba3d8 100644 --- a/latest/index.html +++ b/latest/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../1.2/... + Redirecting to ../1.3/... \ No newline at end of file diff --git a/latest/migrating-from-old-versions/index.html b/latest/migrating-from-old-versions/index.html index 86c77968..edf538b0 100644 --- a/latest/migrating-from-old-versions/index.html +++ b/latest/migrating-from-old-versions/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/migrating-from-old-versions/... + Redirecting to ../../1.3/migrating-from-old-versions/... \ No newline at end of file diff --git a/latest/overrides/main.html b/latest/overrides/main.html index 9b02e188..ff390e26 100644 --- a/latest/overrides/main.html +++ b/latest/overrides/main.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/overrides/main.html... + Redirecting to ../../1.3/overrides/main.html... \ No newline at end of file diff --git a/latest/overrides/partials/integrations/utterances.html b/latest/overrides/partials/integrations/utterances.html index 8f8f1a0a..0ac06fda 100644 --- a/latest/overrides/partials/integrations/utterances.html +++ b/latest/overrides/partials/integrations/utterances.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../../1.2/overrides/partials/integrations/utterances.html... + Redirecting to ../../../../1.3/overrides/partials/integrations/utterances.html... \ No newline at end of file diff --git a/latest/setup/configuration/index.html b/latest/setup/configuration/index.html index 66a52d94..80ef3d43 100644 --- a/latest/setup/configuration/index.html +++ b/latest/setup/configuration/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/setup/configuration/... + Redirecting to ../../../1.3/setup/configuration/... \ No newline at end of file diff --git a/latest/setup/execution/index.html b/latest/setup/execution/index.html index 8d4b2d40..d975cd70 100644 --- a/latest/setup/execution/index.html +++ b/latest/setup/execution/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/setup/execution/... + Redirecting to ../../../1.3/setup/execution/... \ No newline at end of file diff --git a/latest/setup/installation/index.html b/latest/setup/installation/index.html index e0fe0553..fa407e49 100644 --- a/latest/setup/installation/index.html +++ b/latest/setup/installation/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/setup/installation/... + Redirecting to ../../../1.3/setup/installation/... \ No newline at end of file diff --git a/latest/setup/overview/index.html b/latest/setup/overview/index.html index c49dd29f..c1fed0a9 100644 --- a/latest/setup/overview/index.html +++ b/latest/setup/overview/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/setup/overview/... + Redirecting to ../../../1.3/setup/overview/... \ No newline at end of file diff --git a/latest/snippets/aware_format/index.html b/latest/snippets/aware_format/index.html index 5a341fb5..6f78308d 100644 --- a/latest/snippets/aware_format/index.html +++ b/latest/snippets/aware_format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/snippets/aware_format/... + Redirecting to ../../../1.3/snippets/aware_format/... \ No newline at end of file diff --git a/latest/snippets/database/index.html b/latest/snippets/database/index.html index a426498d..15c1fe8a 100644 --- a/latest/snippets/database/index.html +++ b/latest/snippets/database/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/snippets/database/... + Redirecting to ../../../1.3/snippets/database/... \ No newline at end of file diff --git a/latest/snippets/feature_introduction_example/index.html b/latest/snippets/feature_introduction_example/index.html index a8cb6f47..6b6f09bc 100644 --- a/latest/snippets/feature_introduction_example/index.html +++ b/latest/snippets/feature_introduction_example/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/snippets/feature_introduction_example/... + Redirecting to ../../../1.3/snippets/feature_introduction_example/... \ No newline at end of file diff --git a/latest/snippets/jsonfitbit_format/index.html b/latest/snippets/jsonfitbit_format/index.html index 4ef0f995..4e281b31 100644 --- a/latest/snippets/jsonfitbit_format/index.html +++ b/latest/snippets/jsonfitbit_format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/snippets/jsonfitbit_format/... + Redirecting to ../../../1.3/snippets/jsonfitbit_format/... \ No newline at end of file diff --git a/latest/snippets/parsedfitbit_format/index.html b/latest/snippets/parsedfitbit_format/index.html index 7f80c686..fffe7bf0 100644 --- a/latest/snippets/parsedfitbit_format/index.html +++ b/latest/snippets/parsedfitbit_format/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/snippets/parsedfitbit_format/... + Redirecting to ../../../1.3/snippets/parsedfitbit_format/... \ No newline at end of file diff --git a/latest/team/index.html b/latest/team/index.html index b8dd1fbe..4c71fa97 100644 --- a/latest/team/index.html +++ b/latest/team/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../1.2/team/... + Redirecting to ../../1.3/team/... \ No newline at end of file diff --git a/latest/visualizations/data-quality-visualizations/index.html b/latest/visualizations/data-quality-visualizations/index.html index 1a25f2c3..9be3e583 100644 --- a/latest/visualizations/data-quality-visualizations/index.html +++ b/latest/visualizations/data-quality-visualizations/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/visualizations/data-quality-visualizations/... + Redirecting to ../../../1.3/visualizations/data-quality-visualizations/... \ No newline at end of file diff --git a/latest/visualizations/feature-visualizations/index.html b/latest/visualizations/feature-visualizations/index.html index bf93f7a0..49185b83 100644 --- a/latest/visualizations/feature-visualizations/index.html +++ b/latest/visualizations/feature-visualizations/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/visualizations/feature-visualizations/... + Redirecting to ../../../1.3/visualizations/feature-visualizations/... \ No newline at end of file diff --git a/latest/workflow-examples/analysis/index.html b/latest/workflow-examples/analysis/index.html index 984b0908..d5121d5f 100644 --- a/latest/workflow-examples/analysis/index.html +++ b/latest/workflow-examples/analysis/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/workflow-examples/analysis/... + Redirecting to ../../../1.3/workflow-examples/analysis/... \ No newline at end of file diff --git a/latest/workflow-examples/minimal/index.html b/latest/workflow-examples/minimal/index.html index 3213bcb7..8c15e825 100644 --- a/latest/workflow-examples/minimal/index.html +++ b/latest/workflow-examples/minimal/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../1.2/workflow-examples/minimal/... + Redirecting to ../../../1.3/workflow-examples/minimal/... \ No newline at end of file diff --git a/versions.json b/versions.json index 26bc0006..8f230024 100644 --- a/versions.json +++ b/versions.json @@ -1 +1 @@ -[{"version": "1.2", "title": "1.2", "aliases": ["latest"]}, {"version": "1.1", "title": "1.1", "aliases": []}, {"version": "1.0", "title": "1.0", "aliases": []}, {"version": "0.4", "title": "0.4", "aliases": []}, {"version": "0.3", "title": "0.3", "aliases": []}, {"version": "0.2", "title": "0.2", "aliases": []}, {"version": "0.1", "title": "0.1", "aliases": []}, {"version": "dev", "title": "dev", "aliases": []}] \ No newline at end of file +[{"version": "1.3", "title": "1.3", "aliases": ["latest"]}, {"version": "1.2", "title": "1.2", "aliases": []}, {"version": "1.1", "title": "1.1", "aliases": []}, {"version": "1.0", "title": "1.0", "aliases": []}, {"version": "0.4", "title": "0.4", "aliases": []}, {"version": "0.3", "title": "0.3", "aliases": []}, {"version": "0.2", "title": "0.2", "aliases": []}, {"version": "0.1", "title": "0.1", "aliases": []}, {"version": "dev", "title": "dev", "aliases": []}] \ No newline at end of file