idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
51,600
def controlPoints ( cmd , data ) : cmd = cmd . lower ( ) if cmd in [ 'c' , 's' , 'q' ] : indices = range ( len ( data ) ) if cmd == 'c' : return [ index for index in indices if ( index % 6 ) < 4 ] elif cmd in [ 's' , 'q' ] : return [ index for index in indices if ( index % 4 ) < 2 ] return [ ]
Checks if there are control points in the path data
51,601
def flags ( cmd , data ) : if cmd . lower ( ) == 'a' : indices = range ( len ( data ) ) return [ index for index in indices if ( index % 7 ) in [ 3 , 4 ] ] return [ ]
Checks if there are flags in the path data
51,602
def serializePath ( pathObj , options ) : return '' . join ( [ cmd + scourCoordinates ( data , options , control_points = controlPoints ( cmd , data ) , flags = flags ( cmd , data ) ) for cmd , data in pathObj ] )
Reserializes the path data with some cleanups .
51,603
def serializeTransform ( transformObj ) : return ' ' . join ( [ command + '(' + ' ' . join ( [ scourUnitlessLength ( number ) for number in numbers ] ) + ')' for command , numbers in transformObj ] )
Reserializes the transform data with some cleanups .
51,604
def scourLength ( length ) : length = SVGLength ( length ) return scourUnitlessLength ( length . value ) + Unit . str ( length . units )
Scours a length . Accepts units .
51,605
def scourUnitlessLength ( length , renderer_workaround = False , is_control_point = False ) : if not isinstance ( length , Decimal ) : length = getcontext ( ) . create_decimal ( str ( length ) ) initial_length = length if is_control_point : length = scouringContextC . plus ( length ) else : length = scouringContext . plus ( length ) intLength = length . to_integral_value ( ) if length == intLength : length = Decimal ( intLength ) else : length = length . normalize ( ) nonsci = '{0:f}' . format ( length ) nonsci = '{0:f}' . format ( initial_length . quantize ( Decimal ( nonsci ) ) ) if not renderer_workaround : if len ( nonsci ) > 2 and nonsci [ : 2 ] == '0.' : nonsci = nonsci [ 1 : ] elif len ( nonsci ) > 3 and nonsci [ : 3 ] == '-0.' : nonsci = '-' + nonsci [ 2 : ] return_value = nonsci if len ( nonsci ) > 3 : exponent = length . adjusted ( ) length = length . scaleb ( - exponent ) . normalize ( ) sci = six . text_type ( length ) + 'e' + six . text_type ( exponent ) if len ( sci ) < len ( nonsci ) : return_value = sci return return_value
Scours the numeric part of a length only . Does not accept units .
51,606
def reducePrecision ( element ) : num = 0 styles = _getStyle ( element ) for lengthAttr in [ 'opacity' , 'flood-opacity' , 'fill-opacity' , 'stroke-opacity' , 'stop-opacity' , 'stroke-miterlimit' , 'stroke-dashoffset' , 'letter-spacing' , 'word-spacing' , 'kerning' , 'font-size-adjust' , 'font-size' , 'stroke-width' ] : val = element . getAttribute ( lengthAttr ) if val != '' : valLen = SVGLength ( val ) if valLen . units != Unit . INVALID : newVal = scourLength ( val ) if len ( newVal ) < len ( val ) : num += len ( val ) - len ( newVal ) element . setAttribute ( lengthAttr , newVal ) if lengthAttr in styles : val = styles [ lengthAttr ] valLen = SVGLength ( val ) if valLen . units != Unit . INVALID : newVal = scourLength ( val ) if len ( newVal ) < len ( val ) : num += len ( val ) - len ( newVal ) styles [ lengthAttr ] = newVal _setStyle ( element , styles ) for child in element . childNodes : if child . nodeType == Node . ELEMENT_NODE : num += reducePrecision ( child ) return num
Because opacities letter spacings stroke widths and all that don t need to be preserved in SVG files with 9 digits of precision .
51,607
def optimizeAngle ( angle ) : if angle < 0 : angle %= - 360 else : angle %= 360 if angle >= 270 : angle -= 360 elif angle < - 90 : angle += 360 return angle
Because any rotation can be expressed within 360 degrees of any given number and since negative angles sometimes are one character longer than corresponding positive angle we shorten the number to one in the range to [ - 90 270 [ .
51,608
def optimizeTransforms ( element , options ) : num = 0 for transformAttr in [ 'transform' , 'patternTransform' , 'gradientTransform' ] : val = element . getAttribute ( transformAttr ) if val != '' : transform = svg_transform_parser . parse ( val ) optimizeTransform ( transform ) newVal = serializeTransform ( transform ) if len ( newVal ) < len ( val ) : if len ( newVal ) : element . setAttribute ( transformAttr , newVal ) else : element . removeAttribute ( transformAttr ) num += len ( val ) - len ( newVal ) for child in element . childNodes : if child . nodeType == Node . ELEMENT_NODE : num += optimizeTransforms ( child , options ) return num
Attempts to optimise transform specifications on the given node and its children .
51,609
def removeComments ( element ) : global _num_bytes_saved_in_comments num = 0 if isinstance ( element , xml . dom . minidom . Comment ) : _num_bytes_saved_in_comments += len ( element . data ) element . parentNode . removeChild ( element ) num += 1 else : for subelement in element . childNodes [ : ] : num += removeComments ( subelement ) return num
Removes comments from the element and its children .
51,610
def save ( self ) : with open ( self . filename , 'w' ) as plist_file : plist_file . write ( str ( self . soup ) )
Save current property list representation to the original file .
51,611
def fetch_userid ( self , side ) : for user in self . users : obj = self . users [ user ] if obj . side == side : return user
Return the userid for the specified bed side .
51,612
async def start ( self ) : _LOGGER . debug ( 'Initializing pyEight Version: %s' , __version__ ) await self . fetch_token ( ) if self . _token is not None : await self . fetch_device_list ( ) await self . assign_users ( ) return True else : return False
Start api initialization .
51,613
async def fetch_token ( self ) : url = '{}/login' . format ( API_URL ) payload = 'email={}&password={}' . format ( self . _email , self . _password ) reg = await self . api_post ( url , None , payload ) if reg is None : _LOGGER . error ( 'Unable to authenticate and fetch eight token.' ) else : self . _userid = reg [ 'session' ] [ 'userId' ] self . _token = reg [ 'session' ] [ 'token' ] self . _expdate = reg [ 'session' ] [ 'expirationDate' ] _LOGGER . debug ( 'UserID: %s, Token: %s' , self . _userid , self . token )
Fetch new session token from api .
51,614
async def fetch_device_list ( self ) : url = '{}/users/me' . format ( API_URL ) dlist = await self . api_get ( url ) if dlist is None : _LOGGER . error ( 'Unable to fetch eight devices.' ) else : self . _devices = dlist [ 'user' ] [ 'devices' ] _LOGGER . debug ( 'Devices: %s' , self . _devices )
Fetch list of devices .
51,615
async def assign_users ( self ) : device = self . _devices [ 0 ] url = '{}/devices/{}?filter=ownerId,leftUserId,rightUserId' . format ( API_URL , device ) data = await self . api_get ( url ) if data is None : _LOGGER . error ( 'Unable to assign eight device users.' ) else : if data [ 'result' ] [ 'rightUserId' ] == self . _userid : self . users [ data [ 'result' ] [ 'rightUserId' ] ] = EightUser ( self , data [ 'result' ] [ 'rightUserId' ] , 'right' ) user_side = 'right' elif data [ 'result' ] [ 'leftUserId' ] == self . _userid : self . users [ data [ 'result' ] [ 'leftUserId' ] ] = EightUser ( self , data [ 'result' ] [ 'leftUserId' ] , 'left' ) user_side = 'left' else : _LOGGER . error ( 'Unable to assign eight device users.' ) if self . _partner : if user_side == 'right' : self . users [ data [ 'result' ] [ 'leftUserId' ] ] = EightUser ( self , data [ 'result' ] [ 'leftUserId' ] , 'left' ) else : self . users [ data [ 'result' ] [ 'rightUserId' ] ] = EightUser ( self , data [ 'result' ] [ 'rightUserId' ] , 'right' )
Update device properties .
51,616
def room_temperature ( self ) : tmp = None tmp2 = None for user in self . users : obj = self . users [ user ] if obj . current_values [ 'processing' ] : if tmp is None : tmp = obj . current_values [ 'room_temp' ] else : tmp = ( tmp + obj . current_values [ 'room_temp' ] ) / 2 else : if tmp2 is None : tmp2 = obj . current_values [ 'room_temp' ] else : tmp2 = ( tmp2 + obj . current_values [ 'room_temp' ] ) / 2 if tmp is not None : return tmp elif tmp2 is not None : return tmp2
Return room temperature for both sides of bed .
51,617
def handle_device_json ( self , data ) : self . _device_json . insert ( 0 , data ) self . _device_json . pop ( )
Manage the device json list .
51,618
async def update_device_data ( self ) : url = '{}/devices/{}?offlineView=true' . format ( API_URL , self . deviceid ) exp_delta = datetime . strptime ( self . _expdate , '%Y-%m-%dT%H:%M:%S.%fZ' ) - datetime . fromtimestamp ( time . time ( ) ) if exp_delta . total_seconds ( ) < 3600 : _LOGGER . debug ( 'Fetching new access token before expiration.' ) await self . fetch_token ( ) device_resp = await self . api_get ( url ) if device_resp is None : _LOGGER . error ( 'Unable to fetch eight device data.' ) else : self . handle_device_json ( device_resp [ 'result' ] ) for user in self . users : self . users [ user ] . dynamic_presence ( )
Update device data json .
51,619
def satisfy ( self , * requirements ) : versions = [ ] for req_range in requirements : try : version = self . _detected_versions [ req_range . name ] except KeyError : try : version = req_range . requirement . detect_version ( ) except ArcanaRequirementNotFoundError as e : if self . _fail_on_missing : raise else : logger . warning ( e ) except ArcanaVersionNotDetectableError as e : if self . _fail_on_undetectable : raise else : logger . warning ( e ) else : self . _detected_versions [ req_range . name ] = version if not req_range . within ( version ) : raise ArcanaVersionError ( "Detected {} version {} is not within requested range {}" . format ( req_range . requirement , version , req_range ) ) versions . append ( version ) return versions
Checks whether the given requirements are satisfiable within the given execution context
51,620
def housecode_to_index ( housecode ) : match = re . search ( r'^([A-P])(\d{1,2})$' , housecode . upper ( ) ) if match : house_index = int ( match . group ( 2 ) ) if 1 <= house_index <= 16 : return ( ord ( match . group ( 1 ) ) - ord ( 'A' ) ) * 16 + house_index - 1 raise ValueError ( "Invalid X10 housecode: %s" % housecode )
Convert a X10 housecode to a zero - based index
51,621
def index_to_housecode ( index ) : if index < 0 or index > 255 : raise ValueError quotient , remainder = divmod ( index , 16 ) return chr ( quotient + ord ( 'A' ) ) + '{:02d}' . format ( remainder + 1 )
Convert a zero - based index to a X10 housecode .
51,622
def _check_checksum ( msg ) : checksum = int ( msg [ - 2 : ] , 16 ) for char in msg [ : - 2 ] : checksum += ord ( char ) if ( checksum % 256 ) != 0 : raise ValueError ( "Elk message checksum invalid" )
Ensure checksum in message is good .
51,623
def _check_message_valid ( msg ) : try : if int ( msg [ : 2 ] , 16 ) != ( len ( msg ) - 2 ) : raise ValueError ( "Elk message length incorrect" ) _check_checksum ( msg ) except IndexError : raise ValueError ( "Elk message length incorrect" )
Check packet length valid and that checksum is good .
51,624
def add_handler ( self , message_type , handler ) : if message_type not in self . _handlers : self . _handlers [ message_type ] = [ ] if handler not in self . _handlers [ message_type ] : self . _handlers [ message_type ] . append ( handler )
Manage callbacks for message handlers .
51,625
def decode ( self , msg ) : _check_message_valid ( msg ) cmd = msg [ 2 : 4 ] decoder = getattr ( self , '_{}_decode' . format ( cmd . lower ( ) ) , None ) if not decoder : cmd = 'unknown' decoder = self . _unknown_decode decoded_msg = decoder ( msg ) for handler in self . _handlers . get ( cmd , [ ] ) : handler ( ** decoded_msg )
Decode an Elk message by passing to appropriate decoder
51,626
def get_fileset ( self , fileset ) : if fileset . format is None : raise ArcanaUsageError ( "Attempting to download {}, which has not been assigned a " "file format (see Fileset.formatted)" . format ( fileset ) ) self . _check_repository ( fileset ) with self : xsession = self . get_xsession ( fileset ) xscan = xsession . scans [ fileset . name ] fileset . uri = xscan . uri fileset . id = xscan . id cache_path = self . _cache_path ( fileset ) need_to_download = True if op . exists ( cache_path ) : if self . _check_md5 : md5_path = cache_path + XnatRepo . MD5_SUFFIX try : with open ( md5_path , 'r' ) as f : cached_checksums = json . load ( f ) if cached_checksums == fileset . checksums : need_to_download = False except IOError : pass else : need_to_download = False if need_to_download : xresource = xscan . resources [ fileset . _resource_name ] tmp_dir = cache_path + '.download' try : os . mkdir ( tmp_dir ) except OSError as e : if e . errno == errno . EEXIST : self . _delayed_download ( tmp_dir , xresource , xscan , fileset , xsession . label , cache_path , delay = self . _race_cond_delay ) else : raise else : self . download_fileset ( tmp_dir , xresource , xscan , fileset , xsession . label , cache_path ) shutil . rmtree ( tmp_dir ) if not fileset . format . directory : ( primary_path , aux_paths ) = fileset . format . assort_files ( op . join ( cache_path , f ) for f in os . listdir ( cache_path ) ) else : primary_path = cache_path aux_paths = None return primary_path , aux_paths
Caches a single fileset ( if the path attribute is accessed and it has not been previously cached for example
51,627
def get_checksums ( self , fileset ) : if fileset . uri is None : raise ArcanaUsageError ( "Can't retrieve checksums as URI has not been set for {}" . format ( fileset ) ) with self : checksums = { r [ 'Name' ] : r [ 'digest' ] for r in self . _login . get_json ( fileset . uri + '/files' ) [ 'ResultSet' ] [ 'Result' ] } if not fileset . format . directory : primary = fileset . format . assort_files ( checksums . keys ( ) ) [ 0 ] checksums [ '.' ] = checksums . pop ( primary ) return checksums
Downloads the MD5 digests associated with the files in the file - set . These are saved with the downloaded files in the cache and used to check if the files have been updated on the server
51,628
def convert_subject_ids ( self , subject_ids ) : if subject_ids is not None : subject_ids = set ( ( '{:03d}' . format ( s ) if isinstance ( s , int ) else s ) for s in subject_ids ) return subject_ids
Convert subject ids to strings if they are integers
51,629
def get_xsession ( self , item ) : subj_label , sess_label = self . _get_item_labels ( item ) with self : xproject = self . _login . projects [ self . project_id ] try : xsubject = xproject . subjects [ subj_label ] except KeyError : xsubject = self . _login . classes . SubjectData ( label = subj_label , parent = xproject ) try : xsession = xsubject . experiments [ sess_label ] except KeyError : xsession = self . _login . classes . MrSessionData ( label = sess_label , parent = xsubject ) if item . derived : xsession . fields [ self . DERIVED_FROM_FIELD ] = self . _get_item_labels ( item , no_from_study = True ) [ 1 ] return xsession
Returns the XNAT session and cache dir corresponding to the item .
51,630
def detect_version_str ( self ) : help_text = run_matlab_cmd ( "help('{}')" . format ( self . test_func ) ) if not help_text : raise ArcanaRequirementNotFoundError ( "Did not find test function '{}' for {}" . format ( self . test_func , self ) ) return self . parse_help_text ( help_text )
Try to detect version of package from command help text . Bit of a long shot as they are typically included
51,631
def target_heating_level ( self ) : try : if self . side == 'left' : level = self . device . device_data [ 'leftTargetHeatingLevel' ] elif self . side == 'right' : level = self . device . device_data [ 'rightTargetHeatingLevel' ] return level except TypeError : return None
Return target heating level .
51,632
def heating_level ( self ) : try : if self . side == 'left' : level = self . device . device_data [ 'leftHeatingLevel' ] elif self . side == 'right' : level = self . device . device_data [ 'rightHeatingLevel' ] return level except TypeError : return None
Return heating level .
51,633
def past_heating_level ( self , num ) : if num > 9 : return 0 try : if self . side == 'left' : level = self . device . device_data_history [ num ] [ 'leftHeatingLevel' ] elif self . side == 'right' : level = self . device . device_data_history [ num ] [ 'rightHeatingLevel' ] return level except TypeError : return 0
Return a heating level from the past .
51,634
def now_heating ( self ) : try : if self . side == 'left' : heat = self . device . device_data [ 'leftNowHeating' ] elif self . side == 'right' : heat = self . device . device_data [ 'rightNowHeating' ] return heat except TypeError : return None
Return current heating state .
51,635
def heating_remaining ( self ) : try : if self . side == 'left' : timerem = self . device . device_data [ 'leftHeatingDuration' ] elif self . side == 'right' : timerem = self . device . device_data [ 'rightHeatingDuration' ] return timerem except TypeError : return None
Return seconds of heat time remaining .
51,636
def last_seen ( self ) : try : if self . side == 'left' : lastseen = self . device . device_data [ 'leftPresenceEnd' ] elif self . side == 'right' : lastseen = self . device . device_data [ 'rightPresenceEnd' ] date = datetime . fromtimestamp ( int ( lastseen ) ) . strftime ( '%Y-%m-%dT%H:%M:%S' ) return date except TypeError : return None
Return mattress last seen time .
51,637
def heating_values ( self ) : heating_dict = { 'level' : self . heating_level , 'target' : self . target_heating_level , 'active' : self . now_heating , 'remaining' : self . heating_remaining , 'last_seen' : self . last_seen , } return heating_dict
Return a dict of all the current heating values .
51,638
def current_sleep_stage ( self ) : try : stages = self . intervals [ 0 ] [ 'stages' ] num_stages = len ( stages ) if num_stages == 0 : return None if self . current_session_processing : stage = stages [ num_stages - 2 ] [ 'stage' ] else : stage = stages [ num_stages - 1 ] [ 'stage' ] except KeyError : stage = None return stage
Return sleep stage for in - progress session .
51,639
def current_sleep_breakdown ( self ) : try : stages = self . intervals [ 0 ] [ 'stages' ] breakdown = { 'awake' : 0 , 'light' : 0 , 'deep' : 0 , 'rem' : 0 } for stage in stages : if stage [ 'stage' ] == 'awake' : breakdown [ 'awake' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'light' : breakdown [ 'light' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'deep' : breakdown [ 'deep' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'rem' : breakdown [ 'rem' ] += stage [ 'duration' ] except KeyError : breakdown = None return breakdown
Return durations of sleep stages for in - progress session .
51,640
def current_bed_temp ( self ) : try : bedtemps = self . intervals [ 0 ] [ 'timeseries' ] [ 'tempBedC' ] num_temps = len ( bedtemps ) if num_temps == 0 : return None bedtemp = bedtemps [ num_temps - 1 ] [ 1 ] except KeyError : bedtemp = None return bedtemp
Return current bed temperature for in - progress session .
51,641
def current_room_temp ( self ) : try : rmtemps = self . intervals [ 0 ] [ 'timeseries' ] [ 'tempRoomC' ] num_temps = len ( rmtemps ) if num_temps == 0 : return None rmtemp = rmtemps [ num_temps - 1 ] [ 1 ] except KeyError : rmtemp = None return rmtemp
Return current room temperature for in - progress session .
51,642
def current_resp_rate ( self ) : try : rates = self . intervals [ 0 ] [ 'timeseries' ] [ 'respiratoryRate' ] num_rates = len ( rates ) if num_rates == 0 : return None rate = rates [ num_rates - 1 ] [ 1 ] except KeyError : rate = None return rate
Return current respiratory rate for in - progress session .
51,643
def current_heart_rate ( self ) : try : rates = self . intervals [ 0 ] [ 'timeseries' ] [ 'heartRate' ] num_rates = len ( rates ) if num_rates == 0 : return None rate = rates [ num_rates - 1 ] [ 1 ] except KeyError : rate = None return rate
Return current heart rate for in - progress session .
51,644
def current_values ( self ) : current_dict = { 'date' : self . current_session_date , 'score' : self . current_sleep_score , 'stage' : self . current_sleep_stage , 'breakdown' : self . current_sleep_breakdown , 'tnt' : self . current_tnt , 'bed_temp' : self . current_bed_temp , 'room_temp' : self . current_room_temp , 'resp_rate' : self . current_resp_rate , 'heart_rate' : self . current_heart_rate , 'processing' : self . current_session_processing , } return current_dict
Return a dict of all the current parameters .
51,645
def last_sleep_breakdown ( self ) : try : stages = self . intervals [ 1 ] [ 'stages' ] except KeyError : return None breakdown = { 'awake' : 0 , 'light' : 0 , 'deep' : 0 , 'rem' : 0 } for stage in stages : if stage [ 'stage' ] == 'awake' : breakdown [ 'awake' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'light' : breakdown [ 'light' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'deep' : breakdown [ 'deep' ] += stage [ 'duration' ] elif stage [ 'stage' ] == 'rem' : breakdown [ 'rem' ] += stage [ 'duration' ] return breakdown
Return durations of sleep stages for last complete session .
51,646
def last_bed_temp ( self ) : try : bedtemps = self . intervals [ 1 ] [ 'timeseries' ] [ 'tempBedC' ] except KeyError : return None tmp = 0 num_temps = len ( bedtemps ) if num_temps == 0 : return None for temp in bedtemps : tmp += temp [ 1 ] bedtemp = tmp / num_temps return bedtemp
Return avg bed temperature for last session .
51,647
def last_room_temp ( self ) : try : rmtemps = self . intervals [ 1 ] [ 'timeseries' ] [ 'tempRoomC' ] except KeyError : return None tmp = 0 num_temps = len ( rmtemps ) if num_temps == 0 : return None for temp in rmtemps : tmp += temp [ 1 ] rmtemp = tmp / num_temps return rmtemp
Return avg room temperature for last session .
51,648
def last_heart_rate ( self ) : try : rates = self . intervals [ 1 ] [ 'timeseries' ] [ 'heartRate' ] except KeyError : return None tmp = 0 num_rates = len ( rates ) if num_rates == 0 : return None for rate in rates : tmp += rate [ 1 ] rateavg = tmp / num_rates return rateavg
Return avg heart rate for last session .
51,649
def last_values ( self ) : last_dict = { 'date' : self . last_session_date , 'score' : self . last_sleep_score , 'breakdown' : self . last_sleep_breakdown , 'tnt' : self . last_tnt , 'bed_temp' : self . last_bed_temp , 'room_temp' : self . last_room_temp , 'resp_rate' : self . last_resp_rate , 'heart_rate' : self . last_heart_rate , 'processing' : self . last_session_processing , } return last_dict
Return a dict of all the last parameters .
51,650
def heating_stats ( self ) : local_5 = [ ] local_10 = [ ] for i in range ( 0 , 10 ) : level = self . past_heating_level ( i ) if level == 0 : _LOGGER . debug ( 'Cant calculate stats yet...' ) return if i < 5 : local_5 . append ( level ) local_10 . append ( level ) _LOGGER . debug ( '%s Heating History: %s' , self . side , local_10 ) try : fiveminavg = statistics . mean ( local_5 ) tenminavg = statistics . mean ( local_10 ) _LOGGER . debug ( '%s Heating 5 min avg: %s' , self . side , fiveminavg ) _LOGGER . debug ( '%s Heating 10 min avg: %s' , self . side , tenminavg ) fivestdev = statistics . stdev ( local_5 ) tenstdev = statistics . stdev ( local_10 ) _LOGGER . debug ( '%s Heating 5 min stdev: %s' , self . side , fivestdev ) _LOGGER . debug ( '%s Heating 10 min stdev: %s' , self . side , tenstdev ) fivevar = statistics . variance ( local_5 ) tenvar = statistics . variance ( local_10 ) _LOGGER . debug ( '%s Heating 5 min variance: %s' , self . side , fivevar ) _LOGGER . debug ( '%s Heating 10 min variance: %s' , self . side , tenvar ) except : _LOGGER . debug ( 'Cant calculate stats yet...' )
Calculate some heating data stats .
51,651
def dynamic_presence ( self ) : if not self . presence : if self . heating_level > 50 : if not self . now_heating : self . presence = True elif self . heating_level - self . target_heating_level >= 8 : self . presence = True elif self . heating_level > 25 : if self . past_heating_level ( 0 ) - self . past_heating_level ( 1 ) >= 2 and self . past_heating_level ( 1 ) - self . past_heating_level ( 2 ) >= 2 and self . past_heating_level ( 2 ) - self . past_heating_level ( 3 ) >= 2 : if not self . now_heating : self . presence = True elif self . heating_level - self . target_heating_level >= 8 : self . presence = True elif self . presence : if self . heating_level <= 15 : self . presence = False elif self . heating_level < 50 : if self . past_heating_level ( 0 ) - self . past_heating_level ( 1 ) < 0 and self . past_heating_level ( 1 ) - self . past_heating_level ( 2 ) < 0 and self . past_heating_level ( 2 ) - self . past_heating_level ( 3 ) < 0 : self . presence = False _LOGGER . debug ( '%s Presence Results: %s' , self . side , self . presence )
Determine presence based on bed heating level and end presence time reported by the api .
51,652
async def set_heating_level ( self , level , duration = 0 ) : url = '{}/devices/{}' . format ( API_URL , self . device . deviceid ) level = 10 if level < 10 else level level = 100 if level > 100 else level if self . side == 'left' : data = { 'leftHeatingDuration' : duration , 'leftTargetHeatingLevel' : level } elif self . side == 'right' : data = { 'rightHeatingDuration' : duration , 'rightTargetHeatingLevel' : level } set_heat = await self . device . api_put ( url , data ) if set_heat is None : _LOGGER . error ( 'Unable to set eight heating level.' ) else : self . device . handle_device_json ( set_heat [ 'device' ] )
Update heating data json .
51,653
async def update_trend_data ( self , startdate , enddate ) : url = '{}/users/{}/trends' . format ( API_URL , self . userid ) params = { 'tz' : self . device . tzone , 'from' : startdate , 'to' : enddate } trends = await self . device . api_get ( url , params ) if trends is None : _LOGGER . error ( 'Unable to fetch eight trend data.' ) else : self . trends = trends [ 'days' ]
Update trends data json for specified time period .
51,654
async def update_intervals_data ( self ) : url = '{}/users/{}/intervals' . format ( API_URL , self . userid ) intervals = await self . device . api_get ( url ) if intervals is None : _LOGGER . error ( 'Unable to fetch eight intervals data.' ) else : self . intervals = intervals [ 'intervals' ]
Update intervals data json for specified time period .
51,655
def save ( obj = None , m2m_data = None ) : m2m_data = { } if m2m_data is None else m2m_data obj . save_base ( raw = True ) for attr , values in m2m_data . items ( ) : for value in values : getattr ( obj , attr ) . add ( value )
Saves a deserialized model object .
51,656
def deserialize_transactions ( self , transactions = None , deserialize_only = None ) : if ( not self . allow_self and transactions . filter ( producer = socket . gethostname ( ) ) . exists ( ) ) : raise TransactionDeserializerError ( f"Not deserializing own transactions. Got " f"allow_self=False, hostname={socket.gethostname()}" ) for transaction in transactions : json_text = self . aes_decrypt ( cipher_text = transaction . tx ) json_text = self . custom_parser ( json_text ) deserialized = next ( self . deserialize ( json_text = json_text ) ) if not deserialize_only : if transaction . action == DELETE : deserialized . object . delete ( ) else : self . save ( obj = deserialized . object , m2m_data = deserialized . m2m_data ) transaction . is_consumed = True transaction . save ( )
Deserializes the encrypted serialized model instances tx in a queryset of transactions .
51,657
def custom_parser ( self , json_text = None ) : app_config = django_apps . get_app_config ( "django_collect_offline" ) for json_parser in app_config . custom_json_parsers : json_text = json_parser ( json_text ) return json_text
Runs json_text thru custom parsers .
51,658
def cli ( self ) : client = paramiko . SSHClient ( ) client . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) client . connect ( hostname = self . private_hostname , username = self . username , key_filename = self . key_filename , timeout = self . timeout , look_for_keys = self . look_for_keys ) transport = client . get_transport ( ) transport . set_keepalive ( 3 ) return client
cli lazy property
51,659
def channel ( self ) : chan = self . cli . invoke_shell ( width = 360 , height = 80 ) chan . setblocking ( 0 ) chan . settimeout ( 10 ) result = "" count = 0 while count < 10 : try : recv_part = chan . recv ( 16384 ) . decode ( ) result += recv_part except socket . timeout : pass if result . find ( '%s@' % self . username ) != - 1 : return chan time . sleep ( 1 ) count += 1 raise StitchesConnectionException ( "Failed to get shell prompt" )
channel lazy property
51,660
def pbm ( self ) : if not self . disable_rpyc : from plumbum import SshMachine return SshMachine ( host = self . private_hostname , user = self . username , keyfile = self . key_filename , ssh_opts = [ "-o" , "UserKnownHostsFile=/dev/null" , "-o" , "StrictHostKeyChecking=no" ] ) else : return None
Plumbum lazy property
51,661
def rpyc ( self ) : if not self . disable_rpyc : try : import rpyc devnull_fd = open ( "/dev/null" , "w" ) rpyc_dirname = os . path . dirname ( rpyc . __file__ ) rnd_id = '' . join ( random . choice ( string . ascii_lowercase ) for x in range ( 10 ) ) pid_filename = "/tmp/%s.pid" % rnd_id pid_dest_filename = "/tmp/%s%s.pid" % ( rnd_id , rnd_id ) rnd_filename = "/tmp/" + rnd_id + ".tar.gz" rnd_dest_filename = "/tmp/" + rnd_id + rnd_id + ".tar.gz" subprocess . check_call ( [ "tar" , "-cz" , "--exclude" , "*.pyc" , "--exclude" , "*.pyo" , "--transform" , "s,%s,%s," % ( rpyc_dirname [ 1 : ] [ : - 5 ] , rnd_id ) , rpyc_dirname , "-f" , rnd_filename ] , stdout = devnull_fd , stderr = devnull_fd ) devnull_fd . close ( ) self . sftp . put ( rnd_filename , rnd_dest_filename ) os . remove ( rnd_filename ) self . recv_exit_status ( "tar -zxvf %s -C /tmp" % rnd_dest_filename , 10 ) server_script = r + pid_filename + r command = "echo \"%s\" | PYTHONPATH=\"/tmp/%s\" python " % ( server_script , rnd_id ) self . stdin_rpyc , self . stdout_rpyc , self . stderr_rpyc = self . exec_command ( command , get_pty = True ) self . recv_exit_status ( "while [ ! -f %s ]; do sleep 1; done" % ( pid_filename ) , 10 ) self . sftp . get ( pid_filename , pid_dest_filename ) pid_fd = open ( pid_dest_filename , 'r' ) port = int ( pid_fd . read ( ) ) pid_fd . close ( ) os . remove ( pid_dest_filename ) return rpyc . classic . ssh_connect ( self . pbm , port ) except Exception as err : self . logger . debug ( "Failed to setup rpyc: %s" % err ) return None else : return None
RPyC lazy property
51,662
def exec_command ( self , command , bufsize = - 1 , get_pty = False ) : self . last_command = command return self . cli . exec_command ( command , bufsize , get_pty = get_pty )
Execute a command in the connection
51,663
def recv_exit_status ( self , command , timeout = 10 , get_pty = False ) : status = None self . last_command = command stdin , stdout , stderr = self . cli . exec_command ( command , get_pty = get_pty ) if stdout and stderr and stdin : for _ in range ( timeout ) : if stdout . channel . exit_status_ready ( ) : status = stdout . channel . recv_exit_status ( ) break time . sleep ( 1 ) self . last_stdout = stdout . read ( ) self . last_stderr = stderr . read ( ) stdin . close ( ) stdout . close ( ) stderr . close ( ) return status
Execute a command and get its return value
51,664
def load ( cls , pipeline_name , frequency , subject_id , visit_id , from_study , path ) : with open ( path ) as f : prov = json . load ( f ) return Record ( pipeline_name , frequency , subject_id , visit_id , from_study , prov )
Loads a saved provenance object from a JSON file
51,665
def mismatches ( self , other , include = None , exclude = None ) : if include is not None : include_res = [ self . _gen_prov_path_regex ( p ) for p in include ] if exclude is not None : exclude_res = [ self . _gen_prov_path_regex ( p ) for p in exclude ] diff = DeepDiff ( self . _prov , other . _prov , ignore_order = True ) def include_change ( change ) : if include is None : included = True else : included = any ( rx . match ( change ) for rx in include_res ) if included and exclude is not None : included = not any ( rx . match ( change ) for rx in exclude_res ) return included filtered_diff = { } for change_type , changes in diff . items ( ) : if isinstance ( changes , dict ) : filtered = dict ( ( k , v ) for k , v in changes . items ( ) if include_change ( k ) ) else : filtered = [ c for c in changes if include_change ( c ) ] if filtered : filtered_diff [ change_type ] = filtered return filtered_diff
Compares information stored within provenance objects with the exception of version information to see if they match . Matches are constrained to the paths passed to the include kwarg with the exception of sub - paths passed to the exclude kwarg
51,666
def translate ( cls , substudy_name , pipeline_getter , auto_added = False ) : assert isinstance ( substudy_name , basestring ) assert isinstance ( pipeline_getter , basestring ) def translated_getter ( self , ** name_maps ) : substudy_spec = self . substudy_spec ( substudy_name ) return getattr ( self . substudy ( substudy_name ) , pipeline_getter ) ( prefix = substudy_name + '_' , input_map = substudy_spec . name_map , output_map = substudy_spec . name_map , study = self , name_maps = name_maps ) translated_getter . auto_added = auto_added return translated_getter
A method for translating pipeline constructors from a sub - study to the namespace of a multi - study . Returns a new method that calls the sub - study pipeline constructor with appropriate keyword arguments
51,667
def auto_data_specs ( self ) : for spec in self . study_class . data_specs ( ) : if spec . name not in self . _name_map : yield spec
Data specs in the sub - study class that are not explicitly provided in the name map
51,668
def auto_param_specs ( self ) : for spec in self . study_class . parameter_specs ( ) : if spec . name not in self . _name_map : yield spec
Parameter pecs in the sub - study class that are not explicitly provided in the name map
51,669
def _make_nodes ( self , cwd = None ) : for i , node in NipypeMapNode . _make_nodes ( self , cwd = cwd ) : node . __class__ = self . node_cls node . _environment = self . _environment node . _versions = self . _versions node . _wall_time = self . _wall_time node . _annotations = self . _annotations yield i , node
Cast generated nodes to be Arcana nodes
51,670
def get_query ( query_name ) : query_file_match = list ( filter ( lambda i : query_name == i . stem , FLAT_QUERIES ) ) if not query_file_match : return None query_file = query_file_match [ 0 ] with open ( query_file ) as f : metadata , query_body = frontmatter . parse ( f . read ( ) ) result_mod = query_file . suffix . strip ( '.' ) query_obj = SimpleNamespace ( name = query_name , metadata = metadata , path = query_file , result_mod = result_mod , body = query_body , error = False , executed = datetime . utcnow ( ) . isoformat ( ) ) return query_obj
Find file matching query_name read and return query object
51,671
def get_result_set ( query_name , ** kwargs ) : query = get_query ( query_name ) if not query : query = SimpleNamespace ( ) query . error = f"No query found matching '{query_name}'" return query try : result_mod = import_module ( f'nerium.contrib.resultset.{query.result_mod}' ) except ModuleNotFoundError : result_mod = import_module ( 'nerium.resultset.sql' ) query . params = { ** kwargs } query . body = process_template ( sql = query . body , ** query . params ) result = result_mod . result ( query , ** query . params ) query . result = json . loads ( json . dumps ( result , default = serialize_objects_handler ) ) try : if 'error' in query . result [ 0 ] . keys ( ) : query . error = query . result [ 0 ] [ 'error' ] except IndexError : pass return query
Call get_query then submit query from file to resultset module
51,672
def results_to_csv ( query_name , ** kwargs ) : query = get_result_set ( query_name , ** kwargs ) result = query . result columns = list ( result [ 0 ] . keys ( ) ) data = [ tuple ( row . values ( ) ) for row in result ] frame = tablib . Dataset ( ) frame . headers = columns for row in data : frame . append ( row ) csvs = frame . export ( 'csv' ) return csvs
Generate CSV from result data
51,673
def register ( self , models = None , wrapper_cls = None ) : self . loaded = True for model in models : model = model . lower ( ) if model not in self . registry : self . registry . update ( { model : wrapper_cls or self . wrapper_cls } ) if self . register_historical : historical_model = ".historical" . join ( model . split ( "." ) ) self . registry . update ( { historical_model : wrapper_cls or self . wrapper_cls } ) else : raise AlreadyRegistered ( f"Model is already registered. Got {model}." )
Registers with app_label . modelname wrapper_cls .
51,674
def register_for_app ( self , app_label = None , exclude_models = None , exclude_model_classes = None ) : models = [ ] exclude_models = exclude_models or [ ] app_config = django_apps . get_app_config ( app_label ) for model in app_config . get_models ( ) : if model . _meta . label_lower in exclude_models : pass elif exclude_model_classes and issubclass ( model , exclude_model_classes ) : pass else : models . append ( model . _meta . label_lower ) self . register ( models )
Registers all models for this app_label .
51,675
def get_wrapped_instance ( self , instance = None ) : if instance . _meta . label_lower not in self . registry : raise ModelNotRegistered ( f"{repr(instance)} is not registered with {self}." ) wrapper_cls = self . registry . get ( instance . _meta . label_lower ) or self . wrapper_cls if wrapper_cls : return wrapper_cls ( instance ) return instance
Returns a wrapped model instance .
51,676
def site_models ( self , app_label = None ) : site_models = { } app_configs = ( django_apps . get_app_configs ( ) if app_label is None else [ django_apps . get_app_config ( app_label ) ] ) for app_config in app_configs : model_list = [ model for model in app_config . get_models ( ) if model . _meta . label_lower in self . registry ] if model_list : model_list . sort ( key = lambda m : m . _meta . verbose_name ) site_models . update ( { app_config . name : model_list } ) return site_models
Returns a dictionary of registered models .
51,677
def get_fileset ( self , fileset ) : if fileset . _path is None : primary_path = self . fileset_path ( fileset ) aux_files = fileset . format . default_aux_file_paths ( primary_path ) if not op . exists ( primary_path ) : raise ArcanaMissingDataException ( "{} does not exist in {}" . format ( fileset , self ) ) for aux_name , aux_path in aux_files . items ( ) : if not op . exists ( aux_path ) : raise ArcanaMissingDataException ( "{} is missing '{}' side car in {}" . format ( fileset , aux_name , self ) ) else : primary_path = fileset . path aux_files = fileset . aux_files return primary_path , aux_files
Set the path of the fileset from the repository
51,678
def get_field ( self , field ) : fpath = self . fields_json_path ( field ) try : with InterProcessLock ( fpath + self . LOCK_SUFFIX , logger = logger ) , open ( fpath , 'r' ) as f : dct = json . load ( f ) val = dct [ field . name ] if field . array : val = [ field . dtype ( v ) for v in val ] else : val = field . dtype ( val ) except ( KeyError , IOError ) as e : try : if e . errno != errno . ENOENT : raise except AttributeError : pass raise ArcanaMissingDataException ( "{} does not exist in the local repository {}" . format ( field . name , self ) ) return val
Update the value of the field from the repository
51,679
def put_fileset ( self , fileset ) : target_path = self . fileset_path ( fileset ) if op . isfile ( fileset . path ) : shutil . copyfile ( fileset . path , target_path ) for aux_name , aux_path in fileset . format . default_aux_file_paths ( target_path ) . items ( ) : shutil . copyfile ( self . aux_file [ aux_name ] , aux_path ) elif op . isdir ( fileset . path ) : if op . exists ( target_path ) : shutil . rmtree ( target_path ) shutil . copytree ( fileset . path , target_path ) else : assert False
Inserts or updates a fileset in the repository
51,680
def put_field ( self , field ) : fpath = self . fields_json_path ( field ) with InterProcessLock ( fpath + self . LOCK_SUFFIX , logger = logger ) : try : with open ( fpath , 'r' ) as f : dct = json . load ( f ) except IOError as e : if e . errno == errno . ENOENT : dct = { } else : raise if field . array : dct [ field . name ] = list ( field . value ) else : dct [ field . name ] = field . value with open ( fpath , 'w' ) as f : json . dump ( dct , f , indent = 2 )
Inserts or updates a field in the repository
51,681
def bind ( self , study , ** kwargs ) : if self . frequency == 'per_subject' : tree_subject_ids = list ( study . tree . subject_ids ) subject_ids = list ( self . _collection . keys ( ) ) if tree_subject_ids != subject_ids : raise ArcanaUsageError ( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')" . format ( self . name , "', '" . join ( subject_ids ) , "', '" . join ( tree_subject_ids ) ) ) elif self . frequency == 'per_visit' : tree_visit_ids = list ( study . tree . visit_ids ) visit_ids = list ( self . _collection . keys ( ) ) if tree_visit_ids != visit_ids : raise ArcanaUsageError ( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')" . format ( self . name , "', '" . join ( visit_ids ) , "', '" . join ( tree_visit_ids ) ) ) elif self . frequency == 'per_session' : for subject in study . tree . subjects : if subject . id not in self . _collection : raise ArcanaUsageError ( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')" . format ( subject . id , self . name , "', '" . join ( self . _collection . keys ( ) ) ) ) for session in subject . sessions : if session . visit_id not in self . _collection [ subject . id ] : raise ArcanaUsageError ( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" . format ( subject . id , self . name , "', '" . join ( self . _collection [ subject . id ] . keys ( ) ) ) )
Used for duck typing Collection objects with Spec and Match in source and sink initiation . Checks IDs match sessions in study .
51,682
def fileset ( self , name , from_study = None , format = None ) : if isinstance ( name , BaseFileset ) : if from_study is None and name . derived : from_study = name . study . name name = name . name try : format_dct = self . _filesets [ ( name , from_study ) ] except KeyError : available = [ ( '{}(format={})' . format ( f . name , f . _resource_name ) if f . _resource_name is not None else f . name ) for f in self . filesets if f . from_study == from_study ] other_studies = [ ( f . from_study if f . from_study is not None else '<root>' ) for f in self . filesets if f . name == name ] if other_studies : msg = ( ". NB: matching fileset(s) found for '{}' study(ies) " "('{}')" . format ( name , "', '" . join ( other_studies ) ) ) else : msg = '' raise ArcanaNameError ( name , ( "{} doesn't have a fileset named '{}'{} " "(available '{}'){}" . format ( self , name , ( " from study '{}'" . format ( from_study ) if from_study is not None else '' ) , "', '" . join ( available ) , msg ) ) ) else : if format is None : all_formats = list ( format_dct . values ( ) ) if len ( all_formats ) > 1 : raise ArcanaNameError ( "Multiple filesets found for '{}'{} in {} with formats" " {}. Need to specify a format" . format ( name , ( "in '{}'" . format ( from_study ) if from_study is not None else '' ) , self , "', '" . join ( format_dct . keys ( ) ) ) ) fileset = all_formats [ 0 ] else : try : if isinstance ( format , str ) : fileset = format_dct [ format ] else : try : fileset = format_dct [ format . ext ] except KeyError : fileset = None for rname , rfileset in format_dct . items ( ) : if rname in format . resource_names ( self . tree . repository . type ) : fileset = rfileset break if fileset is None : raise except KeyError : raise ArcanaNameError ( format , ( "{} doesn't have a fileset named '{}'{} with " "format '{}' (available '{}'){}" . format ( self , name , ( " from study '{}'" . format ( from_study ) if from_study is not None else '' ) , format , "', '" . join ( format_dct . keys ( ) ) , msg ) ) ) return fileset
Gets the fileset named name produced by the Study named study if provided . If a spec is passed instead of a str to the name argument then the study will be set from the spec iff it is derived
51,683
def field ( self , name , from_study = None ) : if isinstance ( name , BaseField ) : if from_study is None and name . derived : from_study = name . study . name name = name . name try : return self . _fields [ ( name , from_study ) ] except KeyError : available = [ d . name for d in self . fields if d . from_study == from_study ] other_studies = [ ( d . from_study if d . from_study is not None else '<root>' ) for d in self . fields if d . name == name ] if other_studies : msg = ( ". NB: matching field(s) found for '{}' study(ies) " "('{}')" . format ( name , "', '" . join ( other_studies ) ) ) else : msg = '' raise ArcanaNameError ( name , ( "{} doesn't have a field named '{}'{} " "(available '{}')" . format ( self , name , ( " from study '{}'" . format ( from_study ) if from_study is not None else '' ) , "', '" . join ( available ) , msg ) ) )
Gets the field named name produced by the Study named study if provided . If a spec is passed instead of a str to the name argument then the study will be set from the spec iff it is derived
51,684
def record ( self , pipeline_name , from_study ) : try : return self . _records [ ( pipeline_name , from_study ) ] except KeyError : found = [ ] for sname , pnames in groupby ( sorted ( self . _records , key = itemgetter ( 1 ) ) , key = itemgetter ( 1 ) ) : found . append ( "'{}' for '{}'" . format ( "', '" . join ( p for p , _ in pnames ) , sname ) ) raise ArcanaNameError ( ( pipeline_name , from_study ) , ( "{} doesn't have a provenance record for pipeline '{}' " "for '{}' study (found {})" . format ( self , pipeline_name , from_study , '; ' . join ( found ) ) ) )
Returns the provenance record for a given pipeline
51,685
def find_mismatch ( self , other , indent = '' ) : if self != other : mismatch = "\n{}{}" . format ( indent , type ( self ) . __name__ ) else : mismatch = '' sub_indent = indent + ' ' if len ( list ( self . filesets ) ) != len ( list ( other . filesets ) ) : mismatch += ( '\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' . format ( len ( list ( self . filesets ) ) , len ( list ( other . filesets ) ) , list ( self . filesets ) , list ( other . filesets ) , indent = sub_indent ) ) else : for s , o in zip ( self . filesets , other . filesets ) : mismatch += s . find_mismatch ( o , indent = sub_indent ) if len ( list ( self . fields ) ) != len ( list ( other . fields ) ) : mismatch += ( '\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' . format ( len ( list ( self . fields ) ) , len ( list ( other . fields ) ) , list ( self . fields ) , list ( other . fields ) , indent = sub_indent ) ) else : for s , o in zip ( self . fields , other . fields ) : mismatch += s . find_mismatch ( o , indent = sub_indent ) return mismatch
Highlights where two nodes differ in a human - readable form
51,686
def nodes ( self , frequency = None ) : if frequency is None : nodes = chain ( * ( self . _nodes ( f ) for f in ( 'per_study' , 'per_subject' , 'per_visit' , 'per_session' ) ) ) else : nodes = self . _nodes ( frequency = frequency ) return nodes
Returns an iterator over all nodes in the tree for the specified frequency . If no frequency is specified then all nodes are returned
51,687
def find_mismatch ( self , other , indent = '' ) : mismatch = super ( Tree , self ) . find_mismatch ( other , indent ) sub_indent = indent + ' ' if len ( list ( self . subjects ) ) != len ( list ( other . subjects ) ) : mismatch += ( '\n{indent}mismatching subject lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' . format ( len ( list ( self . subjects ) ) , len ( list ( other . subjects ) ) , list ( self . subjects ) , list ( other . subjects ) , indent = sub_indent ) ) else : for s , o in zip ( self . subjects , other . subjects ) : mismatch += s . find_mismatch ( o , indent = sub_indent ) if len ( list ( self . visits ) ) != len ( list ( other . visits ) ) : mismatch += ( '\n{indent}mismatching visit lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' . format ( len ( list ( self . visits ) ) , len ( list ( other . visits ) ) , list ( self . visits ) , list ( other . visits ) , indent = sub_indent ) ) else : for s , o in zip ( self . visits , other . visits ) : mismatch += s . find_mismatch ( o , indent = sub_indent ) return mismatch
Used in debugging unittests
51,688
def construct ( cls , repository , filesets = ( ) , fields = ( ) , records = ( ) , file_formats = ( ) , ** kwargs ) : filesets_dict = defaultdict ( list ) for fset in filesets : if file_formats : fset . set_format ( file_formats ) filesets_dict [ ( fset . subject_id , fset . visit_id ) ] . append ( fset ) fields_dict = defaultdict ( list ) for field in fields : fields_dict [ ( field . subject_id , field . visit_id ) ] . append ( field ) records_dict = defaultdict ( list ) for record in records : records_dict [ ( record . subject_id , record . visit_id ) ] . append ( record ) subj_sessions = defaultdict ( list ) visit_sessions = defaultdict ( list ) for sess_id in set ( chain ( filesets_dict , fields_dict , records_dict ) ) : if None in sess_id : continue subj_id , visit_id = sess_id session = Session ( subject_id = subj_id , visit_id = visit_id , filesets = filesets_dict [ sess_id ] , fields = fields_dict [ sess_id ] , records = records_dict [ sess_id ] ) subj_sessions [ subj_id ] . append ( session ) visit_sessions [ visit_id ] . append ( session ) subjects = [ ] for subj_id in subj_sessions : subjects . append ( Subject ( subj_id , sorted ( subj_sessions [ subj_id ] ) , filesets_dict [ ( subj_id , None ) ] , fields_dict [ ( subj_id , None ) ] , records_dict [ ( subj_id , None ) ] ) ) visits = [ ] for visit_id in visit_sessions : visits . append ( Visit ( visit_id , sorted ( visit_sessions [ visit_id ] ) , filesets_dict [ ( None , visit_id ) ] , fields_dict [ ( None , visit_id ) ] , records_dict [ ( None , visit_id ) ] ) ) return Tree ( sorted ( subjects ) , sorted ( visits ) , repository , filesets_dict [ ( None , None ) ] , fields_dict [ ( None , None ) ] , records_dict [ ( None , None ) ] , ** kwargs )
Return the hierarchical tree of the filesets and fields stored in a repository
51,689
def nodes ( self , frequency = None ) : if frequency is None : [ ] elif frequency == 'per_session' : return [ self ] elif frequency in ( 'per_visit' , 'per_subject' ) : return [ self . parent ] elif frequency == 'per_study' : return [ self . parent . parent ]
Returns all nodes of the specified frequency that are related to the given Session
51,690
def write_data ( self , data , response_required = None , timeout = 5.0 , raw = False ) : if self . _transport is None : return if self . _paused : return if self . _waiting_for_response : LOG . debug ( "queueing write %s" , data ) self . _queued_writes . append ( ( data , response_required , timeout ) ) return if response_required : self . _waiting_for_response = response_required if timeout > 0 : self . _timeout_task = self . loop . call_later ( timeout , self . _response_required_timeout ) if not raw : cksum = 256 - reduce ( lambda x , y : x + y , map ( ord , data ) ) % 256 data = data + '{:02X}' . format ( cksum ) if int ( data [ 0 : 2 ] , 16 ) != len ( data ) - 2 : LOG . debug ( "message length wrong: %s" , data ) LOG . debug ( "write_data '%s'" , data ) self . _transport . write ( ( data + '\r\n' ) . encode ( ) )
Write data on the asyncio Protocol
51,691
def unwrap_querystring_lists ( obj ) : new_dict = { key : ( obj [ key ] [ 0 ] if len ( obj [ key ] ) == 1 else obj [ key ] ) for key in obj . keys ( ) } return new_dict
Convert responder querystring params pulling values out of list if there s only one .
51,692
def prerequisites ( self ) : prereqs = defaultdict ( set ) for input in self . inputs : spec = self . _study . spec ( input ) if spec . is_spec and spec . derived : prereqs [ spec . pipeline_getter ] . add ( input . name ) return prereqs
Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines
51,693
def add ( self , name , interface , inputs = None , outputs = None , requirements = None , wall_time = None , annotations = None , ** kwargs ) : if annotations is None : annotations = { } if requirements is None : requirements = [ ] if wall_time is None : wall_time = self . study . processor . default_wall_time if 'mem_gb' not in kwargs or kwargs [ 'mem_gb' ] is None : kwargs [ 'mem_gb' ] = self . study . processor . default_mem_gb if 'iterfield' in kwargs : if 'joinfield' in kwargs or 'joinsource' in kwargs : raise ArcanaDesignError ( "Cannot provide both joinsource and iterfield to when " "attempting to add '{}' node to {}" . foramt ( name , self . _error_msg_loc ) ) node_cls = self . study . environment . node_types [ 'map' ] elif 'joinsource' in kwargs or 'joinfield' in kwargs : if not ( 'joinfield' in kwargs and 'joinsource' in kwargs ) : raise ArcanaDesignError ( "Both joinsource and joinfield kwargs are required to " "create a JoinNode (see {})" . format ( name , self . _error_msg_loc ) ) joinsource = kwargs [ 'joinsource' ] if joinsource in self . study . ITERFIELDS : self . _iterator_joins . add ( joinsource ) node_cls = self . study . environment . node_types [ 'join' ] kwargs [ 'joinsource' ] = '{}_{}' . format ( self . name , joinsource ) else : node_cls = self . study . environment . node_types [ 'base' ] node = node_cls ( self . study . environment , interface , name = "{}_{}" . format ( self . _name , name ) , requirements = requirements , wall_time = wall_time , annotations = annotations , ** kwargs ) self . _workflow . add_nodes ( [ node ] ) if inputs is not None : assert isinstance ( inputs , dict ) for node_input , connect_from in inputs . items ( ) : if isinstance ( connect_from [ 0 ] , basestring ) : input_spec , input_format = connect_from self . connect_input ( input_spec , node , node_input , input_format ) else : conn_node , conn_field = connect_from self . connect ( conn_node , conn_field , node , node_input ) if outputs is not None : assert isinstance ( outputs , dict ) for output_spec , ( node_output , output_format ) in outputs . items ( ) : self . connect_output ( output_spec , node , node_output , output_format ) return node
Adds a processing Node to the pipeline
51,694
def connect_input ( self , spec_name , node , node_input , format = None , ** kwargs ) : if spec_name in self . study . ITERFIELDS : self . _iterator_conns [ spec_name ] . append ( ( node , node_input , format ) ) else : name = self . _map_name ( spec_name , self . _input_map ) if name not in self . study . data_spec_names ( ) : raise ArcanaDesignError ( "Proposed input '{}' to {} is not a valid spec name ('{}')" . format ( name , self . _error_msg_loc , "', '" . join ( self . study . data_spec_names ( ) ) ) ) self . _input_conns [ name ] . append ( ( node , node_input , format , kwargs ) )
Connects a study fileset_spec as an input to the provided node
51,695
def connect_output ( self , spec_name , node , node_output , format = None , ** kwargs ) : name = self . _map_name ( spec_name , self . _output_map ) if name not in self . study . data_spec_names ( ) : raise ArcanaDesignError ( "Proposed output '{}' to {} is not a valid spec name ('{}')" . format ( name , self . _error_msg_loc , "', '" . join ( self . study . data_spec_names ( ) ) ) ) if name in self . _output_conns : prev_node , prev_node_output , _ , _ = self . _output_conns [ name ] logger . info ( "Reassigning '{}' output from {}:{} to {}:{} in {}" . format ( name , prev_node . name , prev_node_output , node . name , node_output , self . _error_msg_loc ) ) self . _output_conns [ name ] = ( node , node_output , format , kwargs )
Connects an output to a study fileset spec
51,696
def _map_name ( self , name , mapper ) : if mapper is not None : if isinstance ( mapper , basestring ) : name = mapper + name try : name = mapper [ name ] except KeyError : pass return name
Maps a spec name to a new value based on the provided mapper
51,697
def requires_conversion ( cls , fileset , file_format ) : if file_format is None : return False try : filset_format = fileset . format except AttributeError : return False else : return ( file_format != filset_format )
Checks whether the fileset matches the requested file format
51,698
def save_graph ( self , fname , style = 'flat' , format = 'png' , ** kwargs ) : fname = os . path . expanduser ( fname ) if not fname . endswith ( '.png' ) : fname += '.png' orig_dir = os . getcwd ( ) tmpdir = tempfile . mkdtemp ( ) os . chdir ( tmpdir ) workflow = self . _workflow workflow . write_graph ( graph2use = style , format = format , ** kwargs ) os . chdir ( orig_dir ) try : shutil . move ( os . path . join ( tmpdir , 'graph_detailed.{}' . format ( format ) ) , fname ) except IOError as e : if e . errno == errno . ENOENT : shutil . move ( os . path . join ( tmpdir , 'graph.{}' . format ( format ) ) , fname ) else : raise shutil . rmtree ( tmpdir )
Saves a graph of the pipeline to file
51,699
def cap ( self ) : to_cap = ( self . _inputnodes , self . _outputnodes , self . _prov ) if to_cap == ( None , None , None ) : self . _inputnodes = { f : self . _make_inputnode ( f ) for f in self . input_frequencies } self . _outputnodes = { f : self . _make_outputnode ( f ) for f in self . output_frequencies } self . _prov = self . _gen_prov ( ) elif None in to_cap : raise ArcanaError ( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}" . format ( self ) )
Caps the construction of the pipeline signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance .