idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
14,000 | def get_sequences ( self , pdb_id = None ) : sequences = { } if pdb_id : for chain_id , sequence in self . get ( pdb_id , { } ) . iteritems ( ) : sequences [ chain_id ] = Sequence . from_sequence ( chain_id , sequence ) else : for pdb_id , v in self . iteritems ( ) : sequences [ pdb_id ] = { } for chain_id , sequence in v . iteritems ( ) : sequences [ pdb_id ] [ chain_id ] = Sequence . from_sequence ( chain_id , sequence ) return sequences | Create Sequence objects for each FASTA sequence . |
14,001 | def get_chain_ids ( self , pdb_id = None , safe_call = False ) : if pdb_id == None and len ( self . keys ( ) ) == 1 : return self [ self . keys ( ) [ 0 ] ] . keys ( ) pdbID = pdbID . upper ( ) if not self . get ( pdbID ) : if not safe_call : raise Exception ( "FASTA object does not contain sequences for PDB %s." % pdbID ) else : return [ ] return self [ pdbID ] . keys ( ) | If the FASTA file only has one PDB ID pdb_id does not need to be specified . Otherwise the list of chains identifiers for pdb_id is returned . |
14,002 | def match ( self , other ) : colortext . message ( "FASTA Match" ) for frompdbID , fromchains in sorted ( self . iteritems ( ) ) : matched_pdbs = { } matched_chains = { } for fromchain , fromsequence in fromchains . iteritems ( ) : for topdbID , tochains in other . iteritems ( ) : for tochain , tosequence in tochains . iteritems ( ) : if fromsequence == tosequence : matched_pdbs [ topdbID ] = matched_pdbs . get ( topdbID , set ( ) ) matched_pdbs [ topdbID ] . add ( fromchain ) matched_chains [ fromchain ] = matched_chains . get ( fromchain , [ ] ) matched_chains [ fromchain ] . append ( ( topdbID , tochain ) ) foundmatches = [ ] colortext . printf ( " %s" % frompdbID , color = "silver" ) for mpdbID , mchains in matched_pdbs . iteritems ( ) : if mchains == set ( fromchains . keys ( ) ) : foundmatches . append ( mpdbID ) colortext . printf ( " PDB %s matched PDB %s on all chains" % ( mpdbID , frompdbID ) , color = "white" ) if foundmatches : for fromchain , fromsequence in fromchains . iteritems ( ) : colortext . printf ( " %s" % ( fromchain ) , color = "silver" ) colortext . printf ( " %s" % ( fromsequence ) , color = self . unique_sequences [ fromsequence ] ) mstr = [ ] for mchain in matched_chains [ fromchain ] : if mchain [ 0 ] in foundmatches : mstr . append ( "%s chain %s" % ( mchain [ 0 ] , mchain [ 1 ] ) ) colortext . printf ( " Matches: %s" % ", " . join ( mstr ) ) else : colortext . error ( " No matches found." ) | This is a noisy terminal - printing function at present since there is no need to make it a proper API function . |
14,003 | def _process_json_data ( person_data ) : person = SwsPerson ( ) if person_data [ "BirthDate" ] : person . birth_date = parse ( person_data [ "BirthDate" ] ) . date ( ) person . directory_release = person_data [ "DirectoryRelease" ] person . email = person_data [ "Email" ] person . employee_id = person_data [ "EmployeeID" ] person . first_name = person_data [ "FirstName" ] person . gender = person_data [ "Gender" ] person . last_name = person_data [ "LastName" ] person . student_name = person_data [ "StudentName" ] if person_data [ "LastEnrolled" ] is not None : last_enrolled = LastEnrolled ( ) last_enrolled . href = person_data [ "LastEnrolled" ] [ "Href" ] last_enrolled . quarter = person_data [ "LastEnrolled" ] [ "Quarter" ] last_enrolled . year = person_data [ "LastEnrolled" ] [ "Year" ] person . last_enrolled = last_enrolled if person_data [ "LocalAddress" ] is not None : address_data = person_data [ "LocalAddress" ] local_address = StudentAddress ( ) local_address . city = address_data [ "City" ] local_address . country = address_data [ "Country" ] local_address . street_line1 = address_data [ "Line1" ] local_address . street_line2 = address_data [ "Line2" ] local_address . postal_code = address_data [ "PostalCode" ] local_address . state = address_data [ "State" ] local_address . zip_code = address_data [ "Zip" ] person . local_address = local_address person . local_phone = person_data [ "LocalPhone" ] if person_data [ "PermanentAddress" ] is not None : perm_address_data = person_data [ "PermanentAddress" ] permanent_address = StudentAddress ( ) permanent_address . city = perm_address_data [ "City" ] permanent_address . country = perm_address_data [ "Country" ] permanent_address . street_line1 = perm_address_data [ "Line1" ] permanent_address . street_line2 = perm_address_data [ "Line2" ] permanent_address . postal_code = perm_address_data [ "PostalCode" ] permanent_address . state = perm_address_data [ "State" ] permanent_address . zip_code = perm_address_data [ "Zip" ] person . permanent_address = permanent_address person . permanent_phone = person_data [ "PermanentPhone" ] person . uwregid = person_data [ "RegID" ] person . student_number = person_data [ "StudentNumber" ] person . student_system_key = person_data [ "StudentSystemKey" ] person . uwnetid = person_data [ "UWNetID" ] person . visa_type = person_data [ "VisaType" ] return person | Returns a uw_sws . models . SwsPerson object |
14,004 | def _make_notice_date ( self , response ) : today = date . today ( ) yesterday = today - timedelta ( days = 1 ) tomorrow = today + timedelta ( days = 1 ) week = today + timedelta ( days = 2 ) next_week = today + timedelta ( weeks = 1 ) future = today + timedelta ( weeks = 3 ) future_end = today + timedelta ( weeks = 5 ) json_data = json . loads ( response . data ) for notice in json_data [ "Notices" ] : if notice [ "NoticeAttributes" ] and len ( notice [ "NoticeAttributes" ] ) > 0 : for attr in notice [ "NoticeAttributes" ] : if attr [ "DataType" ] == "date" : if attr [ "Value" ] == "yesterday" : attr [ "Value" ] = yesterday . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "today" : attr [ "Value" ] = today . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "tomorrow" : attr [ "Value" ] = tomorrow . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "future" : attr [ "Value" ] = future . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "future_end" : attr [ "Value" ] = future_end . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "next_week" : attr [ "Value" ] = next_week . strftime ( "%Y%m%d" ) elif attr [ "Value" ] == "week" : attr [ "Value" ] = week . strftime ( "%Y%m%d" ) else : pass response . data = json . dumps ( json_data ) | Set the date attribte value in the notice mock data |
14,005 | def relative_symlink ( target , link_name ) : link_name = os . path . abspath ( link_name ) abs_target = os . path . abspath ( target ) rel_target = os . path . relpath ( target , os . path . dirname ( link_name ) ) if os . path . exists ( link_name ) : os . remove ( link_name ) os . symlink ( rel_target , link_name ) | Make a symlink to target using the shortest possible relative path . |
14,006 | def params ( self , dict ) : self . _configuration . update ( dict ) self . _measurements . update ( ) | Set configuration variables for an OnShape part . |
14,007 | def update ( self , params = None , client = c ) : uri = self . parent . uri if not params or not self . res : self . get_params ( ) return d = self . payload for k , v in params . items ( ) : m = d [ "currentConfiguration" ] [ self . parameter_map [ k ] ] [ "message" ] if isinstance ( v , bool ) or isinstance ( v , str ) : m [ "value" ] = v else : try : m [ "expression" ] = str ( v ) except KeyError : m [ "value" ] = str ( v ) res = client . update_configuration ( uri . did , uri . wvm , uri . eid , json . dumps ( d ) ) if res . status_code == 200 : self . res = res | Push params to OnShape and synchronize the local copy |
14,008 | def get_params ( self ) : self . res = c . get_configuration ( self . parent . uri . as_dict ( ) ) | Manually pull params defined in config from OnShape and return a python representation of the params . Quantities are converted to pint quantities Bools are converted to python bools and Enums are converted to strings . Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI . |
14,009 | def params ( self ) : payload = self . payload d = { } for i , p in enumerate ( payload [ "currentConfiguration" ] ) : type_name = p [ "typeName" ] cp = payload [ "configurationParameters" ] [ i ] [ "message" ] name = cp [ "parameterName" ] if type_name == "BTMParameterQuantity" : try : v = q ( p [ "message" ] [ "expression" ] ) except : v = q ( p [ "message" ] [ "value" ] , p [ "message" ] [ "units" ] ) elif type_name == "BTMParameterBoolean" : v = p [ "message" ] [ "value" ] elif type_name == "BTMParameterEnum" : enum = p [ "message" ] [ "value" ] enum_map = { d [ 'message' ] [ 'option' ] : i for i , d in enumerate ( cp [ 'options' ] ) } v = cp [ 'options' ] [ enum_map [ enum ] ] [ 'message' ] [ 'optionName' ] d [ name ] = v return d | Get the params of response data from the API . |
14,010 | def update ( self ) : uri = self . parent . uri script = r self . res = c . evaluate_featurescript ( uri . as_dict ( ) , script ) | Update all local variable names to match OnShape . |
14,011 | def getFailedJobIDs ( self , extraLapse = TYPICAL_LAPSE ) : scriptsRun = self . scriptsRun failedJobTimestamps = [ ] nodata = [ ] for name , details in sorted ( scriptsRun . iteritems ( ) ) : if details [ "lastSuccess" ] and expectedScripts . get ( name ) : if not expectedScripts . check ( name , details [ "lastSuccess" ] , extraLapse ) : if details [ "lastRun" ] : failedJobTimestamps . append ( details [ "lastRun" ] ) else : nodata . append ( name ) continue else : if details [ "lastRun" ] : failedJobTimestamps . append ( details [ "lastRun" ] ) else : nodata . append ( name ) continue if details [ "status" ] & RETROSPECT_FAIL : failedJobTimestamps . append ( details [ "lastRun" ] ) elif details [ "status" ] & RETROSPECT_WARNING : failedJobTimestamps . append ( details [ "lastRun" ] ) return failedJobTimestamps , nodata | Returns a list of which identify failed jobs in the scriptsRun table . If a time stamp for a job can be found we return this . The time stamp can be used to index the log . If no time stamp was found return the name of the script instead . |
14,012 | def generateSummaryHTMLTable ( self , extraLapse = TYPICAL_LAPSE ) : scriptsRun = self . scriptsRun html = [ ] html . append ( "<table style='text-align:center;border:1px solid black;margin-left: auto;margin-right: auto;'>\n" ) html . append ( ' <tr><td colspan="4" style="text-align:center"></td></tr>\n' ) html . append ( ' <tr style="font-weight:bold;background-color:#cccccc;text-align:center"><td>Script</td><td>Last status</td><td>Last run</td><td>Last success</td></tr>\n' ) tablestyle = [ 'background-color:#33dd33;' , 'background-color:#33ff33;' ] warningstyle = [ 'background-color:#EA8737;' , 'background-color:#f5b767;' ] failstyle = [ 'background-color:#dd3333;' , 'background-color:#ff3333;' ] count = 0 for name , details in sorted ( scriptsRun . iteritems ( ) ) : status = None rowstyle = tablestyle [ count % 2 ] if details [ "lastSuccess" ] and expectedScripts . get ( name ) : if not expectedScripts . check ( name , details [ "lastSuccess" ] , extraLapse ) : status = "STOPPED" else : rowstyle = failstyle [ count % 2 ] status = "FAIL" laststatusstyle = tablestyle [ count % 2 ] if details [ "status" ] & RETROSPECT_FAIL : laststatusstyle = failstyle [ count % 2 ] status = "FAIL" elif status != "STOPPED" and details [ "status" ] & RETROSPECT_WARNING : laststatusstyle = warningstyle [ count % 2 ] status = "WARNINGS" elif status != "FAIL" and status != "STOPPED" : status = "OK" html . append ( '<tr style="text-align:left;%s">\n' % rowstyle ) if status == "STOPPED" : html . append ( '\t<td style="%s">%s</td>\n' % ( failstyle [ count % 2 ] , name ) ) else : html . append ( '\t<td style="%s">%s</td>' % ( tablestyle [ count % 2 ] , name ) ) if details [ "lastRun" ] : if status == "STOPPED" : html . append ( '\t<td style="%s"><a href="#%s">%s</a></td>\n' % ( failstyle [ count % 2 ] , self . createAnchorID ( name , details [ "lastRun" ] ) , status ) ) else : html . append ( '\t<td style="%s"><a href="#%s">%s</a></td>\n' % ( laststatusstyle , self . createAnchorID ( name , details [ "lastRun" ] ) , status ) ) else : html . append ( '\t<td style="%s">%s</td>\n' % ( laststatusstyle , status ) ) if details [ "lastRun" ] : html . append ( '\t<td style="%s"><a href="#%s">%s</a></td>\n' % ( laststatusstyle , self . createAnchorID ( name , details [ "lastRun" ] ) , details [ "lastRun" ] ) ) else : html . append ( '\t<td style="%s">none found</td>\n' % laststatusstyle ) if details [ "lastSuccess" ] : html . append ( '\t<td><a href="#%s">%s</a></td>\n' % ( self . createAnchorID ( name , details [ "lastSuccess" ] ) , details [ "lastSuccess" ] ) ) else : html . append ( '\t<td>none found</td>\n' ) html . append ( '</tr>\n' ) count += 1 html . append ( "</table>" ) return html | Generates a summary in HTML of the status of the expected scripts broken based on the log . This summary is returned as a list of strings . |
14,013 | def to_csv ( args ) : result_file = args . result_file output_file = args . output_file delimiter = args . delimiter if not os . path . isfile ( result_file ) : raise OSError ( "Results file does not exists" ) headers = [ 'elapsed' , 'epoch' , 'turret_name' , 'scriptrun_time' , 'error' ] headers_row = { } set_database ( result_file , db , { } ) results = Result . select ( ) for item in results : result_item = item . to_dict ( ) for k in result_item [ 'custom_timers' ] . keys ( ) : if k not in headers : headers . append ( k ) headers_row [ k ] = k with open ( output_file , "w+" ) as f : writer = csv . DictWriter ( f , fieldnames = headers , delimiter = delimiter ) headers_row . update ( { 'elapsed' : 'elapsed time' , 'epoch' : 'epoch (in seconds)' , 'turret_name' : 'turret name' , 'scriptrun_time' : 'transaction time' , 'error' : 'error' } ) writer . writerow ( headers_row ) for result_item in results : line = result_item . to_dict ( ) for key , value in line [ 'custom_timers' ] . items ( ) : line [ key ] = value del line [ 'custom_timers' ] writer . writerow ( line ) | Take a sqlite filled database of results and return a csv file |
14,014 | def fraction_correct_fuzzy_linear_create_vector ( z , z_cutoff , z_fuzzy_range ) : assert ( z_fuzzy_range * 2 < z_cutoff ) if ( z == None or numpy . isnan ( z ) ) : return None elif ( z >= z_cutoff + z_fuzzy_range ) : return [ 0 , 0 , 1 ] elif ( z <= - z_cutoff - z_fuzzy_range ) : return [ 1 , 0 , 0 ] elif ( - z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range ) : return [ 0 , 1 , 0 ] elif ( - z_cutoff - z_fuzzy_range < z < - z_cutoff + z_fuzzy_range ) : neutrality = ( z + z_cutoff + z_fuzzy_range ) / ( z_fuzzy_range * 2 ) zvec = [ 1 - neutrality , neutrality , 0 ] elif ( z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range ) : positivity = ( z - z_cutoff + z_fuzzy_range ) / ( z_fuzzy_range * 2 ) zvec = [ 0 , 1 - positivity , positivity ] else : raise Exception ( 'Logical error.' ) length = math . sqrt ( numpy . dot ( zvec , zvec ) ) return numpy . divide ( zvec , length ) | A helper function for fraction_correct_fuzzy_linear . |
14,015 | def apply_quality_control_checks ( seq , check_gen9_seqs = True , check_short_length = True , check_local_gc_content = True , check_global_gc_content = True ) : seq = seq . upper ( ) failure_reasons = [ ] if check_short_length : if len ( seq ) < min_gene_length : failure_reasons . append ( 'minimum_length: Sequence is %d bp long and needs to be at least %d bp' % ( len ( seq ) , min_gene_length ) ) if len ( seq ) > max_gene_length : failure_reasons . append ( 'maximum_length: Sequence is %d bp long and needs to be shorter than %d bp' % ( len ( seq ) , max_gene_length ) ) if check_gen9_seqs : for site in reserved_restriction_sites : pattern = dna . dna_to_re ( site ) reverse_site = dna . reverse_complement ( site ) reverse_pattern = dna . dna_to_re ( reverse_site ) if pattern . match ( seq ) : failure_reasons . append ( 'gen9_restricted_sequences: Reserved sequence %s is present' % ( site ) ) if reverse_pattern . match ( seq ) : failure_reasons . append ( 'gen9_restricted_sequences: Reverse-complement of reserved sequence %s is present' % ( site ) ) if check_global_gc_content : gc_content = dna . gc_content ( seq ) if gc_content < global_gc_content_min : failure_reasons . append ( 'global_gc_content_min: Global GC content is %.3f%% and must be at least %.3f%%' % ( gc_content , global_gc_content_min ) ) if gc_content > global_gc_content_max : failure_reasons . append ( 'global_gc_content_max: Global GC content is %.3f%% and must be less than %.3f%%' % ( gc_content , global_gc_content_max ) ) if check_local_gc_content : windows = [ seq ] if local_gc_window_size < len ( seq ) : windows = dna . sliding_window ( seq , local_gc_window_size ) for seq_window in windows : lgc_content = dna . gc_content ( seq_window ) if lgc_content < local_gc_content_min : failure_reasons . append ( 'local_gc_content_min: Local GC content is %.3f%% and must be at least %.3f%%' % ( lgc_content , local_gc_content_min ) ) break if lgc_content > local_gc_content_max : failure_reasons . append ( 'local_gc_content_max: Local GC content is %.3f%% and must be less than %.3f%%' % ( lgc_content , local_gc_content_max ) ) break for base in dna . dna_bases : homopolymer = base * homopolymer_max_lengths [ base ] if homopolymer in seq : failure_reasons . append ( 'max_%s_homopolymer: %s' % ( base . lower ( ) , dna . case_highlight ( seq , a_homopolymer ) ) ) if failure_reasons : intro = "The given sequence fails following Gen9 design guidelines:" raise ValueError ( '\n' . join ( [ intro ] + failure_reasons ) ) | Raise a ValueError if the given sequence doesn t pass all of the Gen9 quality control design guidelines . Certain checks can be enabled or disabled via the command line . |
14,016 | def get_default_values ( self ) : out = dict ( dx = 0 , dy = 0 , dz = 0 , theta = 0 , phi = 0 , psi = 0 ) dx , dy , dz , _ = np . mean ( self . coord1 - self . coord2 , axis = 1 ) out [ 'dx' ] = dx out [ 'dy' ] = dy out [ 'dz' ] = dz vec1 = self . coord1 [ : - 1 , 1 ] - self . coord1 [ : - 1 , - 1 ] vec2 = self . coord2 [ : - 1 , 1 ] - self . coord2 [ : - 1 , - 1 ] vec1 /= np . linalg . norm ( vec1 ) vec2 /= np . linalg . norm ( vec2 ) v = np . cross ( vec1 , vec2 ) s = np . linalg . norm ( v ) + np . finfo ( DTYPE ) . eps c = vec1 . dot ( vec2 ) vx = np . array ( [ [ 0 , - v [ 2 ] , v [ 1 ] ] , [ v [ 2 ] , 0 , - v [ 0 ] ] , [ - v [ 1 ] , v [ 0 ] , 0 ] ] , dtype = DTYPE ) rotation_matrix = np . eye ( 3 ) + vx + vx . dot ( vx ) * ( 1 - c ) / ( s * s ) out [ 'theta' ] = math . atan2 ( rotation_matrix [ 2 , 1 ] , rotation_matrix [ 2 , 2 ] ) out [ 'phi' ] = math . atan2 ( - rotation_matrix [ 2 , 0 ] , math . hypot ( rotation_matrix [ 2 , 1 ] , rotation_matrix [ 2 , 2 ] ) ) out [ 'psi' ] = math . atan2 ( rotation_matrix [ 1 , 0 ] , rotation_matrix [ 0 , 0 ] ) return out | Make a crude estimation of the alignment using the center of mass and general C - > N orientation . |
14,017 | def get_matrix ( theta , phi , psi , dx , dy , dz , matrix = np . zeros ( ( 4 , 4 ) , dtype = DTYPE ) , angles = np . zeros ( 3 , dtype = DTYPE ) ) : angles [ 0 ] = theta angles [ 1 ] = phi angles [ 2 ] = psi cx , cy , cz = np . cos ( angles ) sx , sy , sz = np . sin ( angles ) rotation = matrix [ : 3 , : 3 ] rotation . flat = ( cx * cz - sx * cy * sz , cx * sz + sx * cy * cz , sx * sy , - sx * cz - cx * cy * sz , - sx * sz + cx * cy * cz , cx * sy , sy * sz , - sy * cz , cy ) matrix [ : 3 , 3 ] = dx , dy , dz matrix [ 3 , 3 ] = 1. return matrix | Build the rotation - translation matrix . |
14,018 | def _tm ( self , theta , phi , psi , dx , dy , dz ) : matrix = self . get_matrix ( theta , phi , psi , dx , dy , dz ) coord = matrix . dot ( self . coord2 ) dist = coord - self . coord1 d_i2 = ( dist * dist ) . sum ( axis = 0 ) tm = - ( 1 / ( 1 + ( d_i2 / self . d02 ) ) ) return tm | Compute the minimisation target not normalised . |
14,019 | def write ( self , outputfile = 'out.pdb' , appended = False ) : matrix = self . get_matrix ( ** self . get_current_values ( ) ) out = open ( outputfile , 'w' ) atomid = 1 if appended : for line in open ( self . pdb1 ) : if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_1 and line [ 21 ] != ' ' ) : continue out . write ( line [ : 7 ] ) out . write ( '{: >4}' . format ( atomid ) ) atomid += 1 out . write ( line [ 11 : 21 ] ) out . write ( 'A' ) out . write ( line [ 22 : ] ) for line in open ( self . pdb2 ) : if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_2 and line [ 21 ] != ' ' ) : continue x = float ( line [ 32 : 38 ] ) y = float ( line [ 39 : 46 ] ) z = float ( line [ 48 : 54 ] ) vec = np . array ( [ x , y , z , 1 ] ) x , y , z , _ = matrix . dot ( vec ) out . write ( line [ : 7 ] ) out . write ( '{: >4}' . format ( atomid ) ) atomid += 1 out . write ( line [ 11 : 21 ] ) out . write ( 'B' ) out . write ( line [ 22 : 30 ] ) out . write ( '{:>8.3f}{:>8.3f}{:>8.3f}' . format ( x , y , z ) ) out . write ( line [ 54 : ] ) out . close ( ) | Save the second PDB file aligned to the first . |
14,020 | def _load_data_alignment ( self , chain1 , chain2 ) : parser = PDB . PDBParser ( QUIET = True ) ppb = PDB . PPBuilder ( ) structure1 = parser . get_structure ( chain1 , self . pdb1 ) structure2 = parser . get_structure ( chain2 , self . pdb2 ) seq1 = str ( ppb . build_peptides ( structure1 ) [ 0 ] . get_sequence ( ) ) seq2 = str ( ppb . build_peptides ( structure2 ) [ 0 ] . get_sequence ( ) ) align = pairwise2 . align . globalms ( seq1 , seq2 , 2 , - 1 , - 0.5 , - 0.1 ) [ 0 ] indexes = set ( i for i , ( s1 , s2 ) in enumerate ( zip ( align [ 0 ] , align [ 1 ] ) ) if s1 != '-' and s2 != '-' ) coord1 = np . hstack ( [ np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] for i , r in enumerate ( structure1 . get_residues ( ) ) if i in indexes and 'CA' in r ] ) . astype ( DTYPE , copy = False ) coord2 = np . hstack ( [ np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] for i , r in enumerate ( structure2 . get_residues ( ) ) if i in indexes and 'CA' in r ] ) . astype ( DTYPE , copy = False ) self . coord1 = coord1 self . coord2 = coord2 self . N = len ( seq1 ) | Extract the sequences from the PDB file perform the alignment and load the coordinates of the CA of the common residues . |
14,021 | def _load_data_index ( self , chain1 , chain2 ) : parser = PDB . PDBParser ( QUIET = True ) structure1 = parser . get_structure ( chain1 , self . pdb1 ) structure2 = parser . get_structure ( chain2 , self . pdb2 ) residues1 = list ( structure1 . get_residues ( ) ) residues2 = list ( structure2 . get_residues ( ) ) indexes1 = set ( r . id [ 1 ] for r in residues1 ) indexes2 = set ( r . id [ 1 ] for r in residues2 ) indexes = indexes1 . intersection ( indexes2 ) self . indexes = indexes . copy ( ) self . N = len ( indexes ) coord1 = [ ] indexes1 = indexes . copy ( ) for r in residues1 : if r . id [ 1 ] in indexes1 and 'CA' in r : coord1 . append ( np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] ) indexes1 . remove ( r . id [ 1 ] ) coord1 = np . hstack ( coord1 ) . astype ( DTYPE , copy = False ) coord2 = [ ] for r in residues2 : if r . id [ 1 ] in indexes and 'CA' in r : coord2 . append ( np . concatenate ( ( r [ 'CA' ] . get_coord ( ) , ( 1 , ) ) ) [ : , None ] ) indexes . remove ( r . id [ 1 ] ) coord2 = np . hstack ( coord2 ) . astype ( DTYPE , copy = False ) self . coord1 = coord1 self . coord2 = coord2 | Load the coordinates of the CA of the common residues . |
14,022 | def _json_to_sectionstatus ( section_data ) : section_status = SectionStatus ( ) if section_data [ "AddCodeRequired" ] == 'true' : section_status . add_code_required = True else : section_status . add_code_required = False section_status . current_enrollment = int ( section_data [ "CurrentEnrollment" ] ) current_period = int ( section_data [ "CurrentRegistrationPeriod" ] ) section_status . current_registration_period = current_period if section_data [ "FacultyCodeRequired" ] == 'true' : section_status . faculty_code_required = True else : section_status . faculty_code_required = False limit_estimate = int ( section_data [ "LimitEstimateEnrollment" ] ) section_status . limit_estimated_enrollment = limit_estimate indicator = section_data [ "LimitEstimateEnrollmentIndicator" ] section_status . limit_estimate_enrollment_indicator = indicator section_status . room_capacity = int ( section_data [ "RoomCapacity" ] ) section_status . sln = int ( section_data [ "SLN" ] ) section_status . space_available = int ( section_data [ "SpaceAvailable" ] ) if section_data [ "Status" ] == "open" : section_status . is_open = True else : section_status . is_open = False return section_status | Returns a uw_sws . models . SectionStatus object created from the passed json . |
14,023 | def renumber_atoms ( lines ) : new_lines = [ ] current_number = 1 for line in lines : if line . startswith ( 'ATOM' ) or line . startswith ( 'HETATM' ) : new_lines . append ( line [ : 6 ] + string . rjust ( '%d' % current_number , 5 ) + line [ 11 : ] ) current_number += 1 else : if line . startswith ( 'TER' ) : current_number += 1 new_lines . append ( line ) return new_lines | Takes in a list of PDB lines and renumbers the atoms appropriately |
14,024 | def clean_alternate_location_indicators ( lines ) : new_lines = [ ] previously_seen_alt_atoms = set ( ) for line in lines : if line . startswith ( 'ATOM' ) : alt_loc_id = line [ 16 ] if alt_loc_id != ' ' : atom_name = line [ 12 : 16 ] . strip ( ) res_name = line [ 17 : 20 ] . strip ( ) chain = line [ 21 ] resnum = long ( line [ 22 : 26 ] . strip ( ) ) loc_tup = ( atom_name , res_name , chain , resnum ) if loc_tup in previously_seen_alt_atoms : continue else : previously_seen_alt_atoms . add ( loc_tup ) line = line [ : 16 ] + ' ' + line [ 17 : ] new_lines . append ( line ) return new_lines | Keeps only the first atom if alternated location identifiers are being used Removes alternate location ID charactor |
14,025 | def parse_pdb_ligand_info ( self , pdb_ligand_info ) : mtchs = re . findall ( '(<ligand.*?</ligand>)' , pdb_ligand_info , re . DOTALL ) for m in mtchs : if m . upper ( ) . find ( 'CHEMICALID="{0}"' . format ( self . PDBCode . upper ( ) ) ) != - 1 : ligand_type = re . match ( '<ligand.*?\stype="(.*?)".*?>' , m , re . DOTALL ) if ligand_type : self . LigandType = ligand_type . group ( 1 ) | This only parses the ligand type as all the other information should be in the . cif file . The XML file has proper capitalization whereas the . cif file uses all caps for the ligand type . |
14,026 | def add_code_mapping ( self , from_pdb_code , to_pdb_code ) : if from_pdb_code in self . code_map : assert ( self . code_map [ from_pdb_code ] == to_pdb_code ) else : self . code_map [ from_pdb_code ] = to_pdb_code | Add a code mapping without a given instance . |
14,027 | def reset_password ( self , action_token , signed_data ) : try : action = "reset-password" user = get_user_by_action_token ( action , action_token ) if not user or not user . signed_data_match ( signed_data , action ) : raise mocha_exc . AppError ( "Verification Invalid!" ) if request . method == "POST" : password = request . form . get ( "password" , "" ) . strip ( ) password_confirm = request . form . get ( "password_confirm" , "" ) . strip ( ) if not password or password != password_confirm : raise exceptions . AuthError ( "Password is missing or passwords don't match" ) user . change_password ( password ) user . set_email_verified ( True ) session_set_require_password_change ( False ) flash_success ( "Password updated successfully!" ) return redirect ( __options__ . get ( "login_view" ) or self . login ) return { "action_token" : action_token , "signed_data" : signed_data } except ( mocha_exc . AppError , exceptions . AuthError ) as ex : flash_error ( str ( ex ) ) except Exception as e : logging . exception ( e ) flash_error ( "Unable to reset password" ) return redirect ( self . login ) | Reset the user password . It was triggered by LOST - PASSWORD |
14,028 | def verify_email ( self , action_token , signed_data ) : try : action = "verify-email" user = get_user_by_action_token ( action , action_token ) if not user or not user . signed_data_match ( signed_data , action ) : raise mocha_exc . AppError ( "Verification Invalid!" ) else : user . set_email_verified ( True ) flash_success ( "Account verified. You can now login" ) username = user . username if user . login_method == "email" : username = user . email return redirect ( self . login , username = username ) except Exception as e : logging . exception ( e ) flash_error ( "Verification Failed!" ) return redirect ( self . login ) | Verify email account in which a link was sent to |
14,029 | def oauth_connect ( self , provider , action ) : valid_actions = [ "connect" , "authorized" , "test" ] _redirect = views . auth . Account . account_settings if is_authenticated ( ) else self . login if action not in valid_actions or "oauth" not in __options__ . get ( "registration_methods" ) or not __options__ . get ( "allow_registration" ) or not hasattr ( oauth , provider ) : return redirect ( _redirect ) client = getattr ( oauth , provider ) params = client . __params__ me_args = params . get ( "me" ) user_id = params . get ( "user_id" ) oauth_user_id = None oauth_name = None oauth_email = None if action == "test" : session_data = { "provider" : "ensure" , "user_id" : "1234" , "name" : "Mardix" , "email" : "mardix@email.com" , } set_oauth_session ( session_data ) return redirect ( url_for ( self . register , oauth = 1 ) ) if action == "connect" : _next = request . args . get ( 'next' ) authorized_url = url_for ( self , provider = provider , action = "authorized" , next = _next or request . referrer or None , _external = True ) return client . authorize ( callback = authorized_url ) elif action == "authorized" : resp = client . authorized_response ( ) if resp is None : pass elif isinstance ( resp , OAuthException ) : flash_error ( "Access Denied" ) else : if not me_args : oauth_user_id = resp . get ( user_id ) else : me = client . get ( me_args ) if action == "authorized" and oauth_user_id : if is_authenticated ( ) : try : current_user . add_federated_login ( provider = provider , federated_id = oauth_user_id ) flash_success ( "You can now login with your %s account" % provider . upper ( ) ) except Exception as e : logging . exception ( e ) return redirect ( views . auth . Account . account_settings ) else : user = with_federation ( provider , oauth_user_id ) if user : create_session ( user ) return redirect ( request . args . get ( "next" ) or __options__ . get ( "login_view" ) ) else : session_data = { "provider" : provider , "user_id" : oauth_user_id , "name" : oauth_name , "email" : oauth_email , } set_oauth_session ( session_data ) else : return redirect ( _redirect ) return { "action" : action , "provider" : provider , "authorized_url" : "" } return redirect ( _redirect ) | This endpoint doesn t check if user is logged in because it has two functions |
14,030 | def log ( self , message , level = None ) : if level is None : level = logging . INFO current_app . logger . log ( msg = message , level = level ) | Write a message to log |
14,031 | def is_instance ( self , model ) : result = isinstance ( model , self . __model__ ) if result is True : return True err = 'Object {} is not of type {}' raise ValueError ( err . format ( model , self . __model__ ) ) | Is instance? Checks if provided object is instance of this service s model . |
14,032 | def create ( self , ** kwargs ) : model = self . new ( ** kwargs ) return self . save ( model ) | Create Instantiates and persists new model populated from provided arguments |
14,033 | def save ( self , model , commit = True ) : self . is_instance ( model ) db . session . add ( model ) if commit : db . session . commit ( ) return model | Save Puts model into unit of work for persistence . Can optionally commit transaction . Returns persisted model as a result . |
14,034 | def delete ( self , model , commit = True ) : self . is_instance ( model ) db . session . delete ( model ) if commit : db . session . commit ( ) return model | Delete Puts model for deletion into unit of work and optionall commits transaction |
14,035 | def is_connectable ( host : str , port : Union [ int , str ] ) -> bool : socket_ = None try : socket_ = socket . create_connection ( ( host , port ) , 1 ) result = True except socket . timeout : result = False finally : if socket_ : socket_ . close ( ) return result | Tries to connect to the device to see if it is connectable . |
14,036 | def group_lines ( lines ) : groups = [ ] group = [ ] for line in lines : if line . strip ( ) == "" : groups . append ( group [ : ] ) group = [ ] continue group . append ( line ) if group : groups . append ( group [ : ] ) return groups | Split a list of lines using empty lines as separators . |
14,037 | async def set_neighbors ( self ) : t = time . time ( ) self . logger . debug ( "Settings grid neighbors for the multi-environments." ) tasks = [ ] for i in range ( len ( self . grid ) ) : for j in range ( len ( self . grid [ 0 ] ) ) : addr = self . grid [ i ] [ j ] N , E , S , W = None , None , None , None if i != 0 : W = self . grid [ i - 1 ] [ j ] if i != len ( self . grid ) - 1 : E = self . grid [ i + 1 ] [ j ] if j != 0 : N = self . grid [ i ] [ j - 1 ] if j != len ( self . grid [ 0 ] ) - 1 : S = self . grid [ i ] [ j + 1 ] task = asyncio . ensure_future ( self . _set_node_neighbors ( addr , N , E , S , W ) ) tasks . append ( task ) await asyncio . gather ( * tasks ) self . logger . debug ( "Setting grid neighbors for the slave environments " "and their agents." ) tasks = [ ] for addr in self . addrs : task = asyncio . ensure_future ( self . _set_neighbors ( addr ) ) tasks . append ( task ) await asyncio . gather ( * tasks ) self . logger . debug ( "All grid neighbors set in {} seconds." . format ( time . time ( ) - t ) ) x = self . _ngs [ 0 ] * self . _gs [ 0 ] * self . _n_slaves y = self . _ngs [ 1 ] * self . _gs [ 1 ] self . logger . info ( "Initialized a distributed grid with overall size " "({}, {}). Total of {} agents." . format ( x , y , x * y ) ) | Set neighbors for multi - environments their slave environments and agents . |
14,038 | async def ssh_exec ( server , cmd , timeout = 10 , ** ssh_kwargs ) : conn = await asyncio . wait_for ( asyncssh . connect ( server , ** ssh_kwargs ) , timeout = timeout ) ret = await conn . run ( cmd ) conn . close ( ) return ret | Execute a command on a given server using asynchronous SSH - connection . |
14,039 | async def spawn_slaves ( self , spawn_cmd , ports = None , ** ssh_kwargs ) : pool = multiprocessing . Pool ( len ( self . nodes ) ) rets = [ ] for i , node in enumerate ( self . nodes ) : server , server_port = node port = ports [ node ] if ports is not None else self . port mgr_addr = "tcp://{}:{}/0" . format ( server , port ) self . _manager_addrs . append ( mgr_addr ) if type ( spawn_cmd ) in [ list , tuple ] : cmd = spawn_cmd [ i ] else : cmd = spawn_cmd args = [ server , cmd ] ssh_kwargs_cp = ssh_kwargs . copy ( ) ssh_kwargs_cp [ 'port' ] = server_port ret = pool . apply_async ( ssh_exec_in_new_loop , args = args , kwds = ssh_kwargs_cp , error_callback = logger . warning ) rets . append ( ret ) self . _pool = pool self . _r = rets | Spawn multi - environments on the nodes through SSH - connections . |
14,040 | def get_slave_managers ( self , as_coro = False ) : async def slave_task ( addr ) : r_manager = await self . env . connect ( addr ) return await r_manager . get_slave_managers ( ) tasks = create_tasks ( slave_task , self . addrs ) return run_or_coro ( tasks , as_coro ) | Return all slave environment manager addresses . |
14,041 | def nodes ( self , type = None , failed = False ) : if type is None : type = Node if not issubclass ( type , Node ) : raise ( TypeError ( "{} is not a valid node type." . format ( type ) ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid node failed" . format ( failed ) ) if failed == "all" : return type . query . filter_by ( participant_id = self . id ) . all ( ) else : return type . query . filter_by ( failed = failed , participant_id = self . id ) . all ( ) | Get nodes associated with this participant . |
14,042 | def print_verbose ( self ) : print "Nodes: " for a in ( self . nodes ( failed = "all" ) ) : print a print "\nVectors: " for v in ( self . vectors ( failed = "all" ) ) : print v print "\nInfos: " for i in ( self . infos ( failed = "all" ) ) : print i print "\nTransmissions: " for t in ( self . transmissions ( failed = "all" ) ) : print t print "\nTransformations: " for t in ( self . transformations ( failed = "all" ) ) : print t | Print a verbose representation of a network . |
14,043 | def vectors ( self , direction = "all" , failed = False ) : if direction not in [ "all" , "incoming" , "outgoing" ] : raise ValueError ( "{} is not a valid vector direction. " "Must be all, incoming or outgoing." . format ( direction ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid vector failed" . format ( failed ) ) if failed == "all" : if direction == "all" : return Vector . query . filter ( or_ ( Vector . destination_id == self . id , Vector . origin_id == self . id ) ) . all ( ) if direction == "incoming" : return Vector . query . filter_by ( destination_id = self . id ) . all ( ) if direction == "outgoing" : return Vector . query . filter_by ( origin_id = self . id ) . all ( ) else : if direction == "all" : return Vector . query . filter ( and_ ( Vector . failed == failed , or_ ( Vector . destination_id == self . id , Vector . origin_id == self . id ) ) ) . all ( ) if direction == "incoming" : return Vector . query . filter_by ( destination_id = self . id , failed = failed ) . all ( ) if direction == "outgoing" : return Vector . query . filter_by ( origin_id = self . id , failed = failed ) . all ( ) | Get vectors that connect at this node . |
14,044 | def transmissions ( self , direction = "outgoing" , status = "all" , failed = False ) : if direction not in [ "incoming" , "outgoing" , "all" ] : raise ( ValueError ( "You cannot get transmissions of direction {}." . format ( direction ) + "Type can only be incoming, outgoing or all." ) ) if status not in [ "all" , "pending" , "received" ] : raise ( ValueError ( "You cannot get transmission of status {}." . format ( status ) + "Status can only be pending, received or all" ) ) if failed not in [ "all" , False , True ] : raise ValueError ( "{} is not a valid transmission failed" . format ( failed ) ) if direction == "all" : if status == "all" : return Transmission . query . filter ( and_ ( Transmission . failed == False , or_ ( Transmission . destination_id == self . id , Transmission . origin_id == self . id ) ) ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . status == status , or_ ( Transmission . destination_id == self . id , Transmission . origin_id == self . id ) ) ) . all ( ) if direction == "incoming" : if status == "all" : return Transmission . query . filter_by ( failed = False , destination_id = self . id ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . destination_id == self . id , Transmission . status == status ) ) . all ( ) if direction == "outgoing" : if status == "all" : return Transmission . query . filter_by ( failed = False , origin_id = self . id ) . all ( ) else : return Transmission . query . filter ( and_ ( Transmission . failed == False , Transmission . origin_id == self . id , Transmission . status == status ) ) . all ( ) | Get transmissions sent to or from this node . |
14,045 | def receive ( self , what = None ) : if self . failed : raise ValueError ( "{} cannot receive as it has failed." . format ( self ) ) received_transmissions = [ ] if what is None : pending_transmissions = self . transmissions ( direction = "incoming" , status = "pending" ) for transmission in pending_transmissions : transmission . status = "received" transmission . receive_time = timenow ( ) received_transmissions . append ( transmission ) elif isinstance ( what , Transmission ) : if what in self . transmissions ( direction = "incoming" , status = "pending" ) : transmission . status = "received" what . receive_time = timenow ( ) received_transmissions . append ( what ) else : raise ( ValueError ( "{} cannot receive {} as it is not " "in its pending_transmissions" . format ( self , what ) ) ) else : raise ValueError ( "Nodes cannot receive {}" . format ( what ) ) self . update ( [ t . info for t in received_transmissions ] ) | Receive some transmissions . |
14,046 | def replicate ( self , info_in ) : if self . failed : raise ValueError ( "{} cannot replicate as it has failed." . format ( self ) ) from transformations import Replication info_out = type ( info_in ) ( origin = self , contents = info_in . contents ) Replication ( info_in = info_in , info_out = info_out ) | Replicate an info . |
14,047 | def mutate ( self , info_in ) : if self . failed : raise ValueError ( "{} cannot mutate as it has failed." . format ( self ) ) from transformations import Mutation info_out = type ( info_in ) ( origin = self , contents = info_in . _mutated_contents ( ) ) Mutation ( info_in = info_in , info_out = info_out ) | Replicate an info + mutation . |
14,048 | def transmissions ( self , status = "all" ) : if status not in [ "all" , "pending" , "received" ] : raise ( ValueError ( "You cannot get {} transmissions." . format ( status ) + "Status can only be pending, received or all" ) ) if status == "all" : return Transmission . query . filter_by ( vector_id = self . id , failed = False ) . all ( ) else : return Transmission . query . filter_by ( vector_id = self . id , status = status , failed = False ) . all ( ) | Get transmissions sent along this Vector . |
14,049 | def serve_forever ( self , banner = None ) : if hasattr ( readline , "read_history_file" ) : try : readline . read_history_file ( self . histfile ) except IOError : pass atexit . register ( self . _save_history ) super ( Shell , self ) . serve_forever ( banner ) | Interact with the user . |
14,050 | def complete ( self , word , state ) : try : import rl rl . completion . suppress_append = True except ImportError : pass word = transform ( word , self . transforms , word = True ) if state == 0 : self . matches = self . get_matches ( word ) try : match = self . matches [ state ] except IndexError : return None else : return transform ( match , self . transforms , word = True , inverse = True ) | Return the next possible completion for word . |
14,051 | def exclude_matches ( self , matches ) : for match in matches : for exclude_pattern in self . exclude_patterns : if re . match ( exclude_pattern , match ) is not None : break else : yield match | Filter any matches that match an exclude pattern . |
14,052 | def gen_filename_completions ( self , word , filenames ) : if not word : return filenames else : trie = pygtrie . CharTrie ( ) for filename in filenames : trie [ filename ] = filename return trie . iterkeys ( prefix = word ) | Generate a sequence of filenames that match word . |
14,053 | def gen_matches ( self , word ) : if word . startswith ( "$" ) : for match in self . gen_variable_completions ( word , os . environ ) : yield match else : head , tail = os . path . split ( word ) filenames = os . listdir ( head or '.' ) completions = self . gen_filename_completions ( tail , filenames ) for match in completions : yield os . path . join ( head , match ) for extension in self . extensions : for match in extension ( word ) : yield match | Generate a sequence of possible completions for word . |
14,054 | def gen_variable_completions ( self , word , env ) : var = word [ 1 : ] for k in env : if k . startswith ( var ) : yield "$" + k | Generate a sequence of possible variable completions for word . |
14,055 | def inflect ( self , filename ) : suffix = ( "/" if os . path . isdir ( filename ) else " " ) return self . _escape ( filename ) + suffix | Inflect a filename to indicate its type . |
14,056 | def state ( self , time = None ) : if time is None : return max ( self . infos ( type = State ) , key = attrgetter ( 'creation_time' ) ) else : states = [ s for s in self . infos ( type = State ) if s . creation_time < time ] return max ( states , key = attrgetter ( 'creation_time' ) ) | The most recently - created info of type State at the specfied time . |
14,057 | def sentry_feature ( app ) : sentry_public_key = app . config . get ( 'SENTRY_PUBLIC_KEY' ) sentry_project_id = app . config . get ( 'SENTRY_PROJECT_ID' ) if not sentry_public_key or not sentry_project_id : return dsn = 'https://{key}@sentry.io/{project_id}' dsn = dsn . format ( key = sentry_public_key , project_id = sentry_project_id ) sentry . init_app ( app = app , dsn = dsn ) | Sentry feature Adds basic integration with Sentry via the raven library |
14,058 | def new_plugin ( self , config , * args , ** kwargs ) : typ = None obj = None if 'type' in config : typ = config [ 'type' ] elif isinstance ( config , collections . Mapping ) and len ( config ) == 1 : ( typ , config ) = list ( config . items ( ) ) [ 0 ] obj = self . _ctor ( typ , config , * args , ** kwargs ) if 'name' in config : self . _instance [ config [ 'name' ] ] = obj else : config [ 'name' ] = typ return obj | instantiate a plugin creates the object stores it in _instance |
14,059 | def to_practice_counts ( request ) : data = None if request . method == "POST" : data = json . loads ( request . body . decode ( "utf-8" ) ) [ "filters" ] if "filters" in request . GET : data = load_query_json ( request . GET , "filters" ) if data is None or len ( data ) == 0 : return render_json ( request , { } , template = 'models_json.html' , help_text = to_practice_counts . __doc__ ) language = get_language ( request ) timer ( 'to_practice_counts' ) filter_names , filter_filters = list ( zip ( * sorted ( data . items ( ) ) ) ) reachable_leaves = Item . objects . filter_all_reachable_leaves_many ( filter_filters , language ) response = { group_id : { 'filter' : data [ group_id ] , 'number_of_items' : len ( items ) , } for group_id , items in zip ( filter_names , reachable_leaves ) } LOGGER . debug ( "to_practice_counts - getting items in groups took %s seconds" , ( timer ( 'to_practice_counts' ) ) ) return render_json ( request , response , template = 'models_json.html' , help_text = to_practice_counts . __doc__ ) | Get number of items available to practice . |
14,060 | def answer ( request ) : if request . method == 'GET' : return render ( request , 'models_answer.html' , { } , help_text = answer . __doc__ ) elif request . method == 'POST' : practice_filter = get_filter ( request ) practice_context = PracticeContext . objects . from_content ( practice_filter ) saved_answers = _save_answers ( request , practice_context , True ) return render_json ( request , saved_answers , status = 200 , template = 'models_answer.html' ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) ) | Save the answer . |
14,061 | def user_stats ( request ) : timer ( 'user_stats' ) response = { } data = None if request . method == "POST" : data = json . loads ( request . body . decode ( "utf-8" ) ) [ "filters" ] if "filters" in request . GET : data = load_query_json ( request . GET , "filters" ) if data is None : return render_json ( request , { } , template = 'models_user_stats.html' , help_text = user_stats . __doc__ ) environment = get_environment ( ) if is_time_overridden ( request ) : environment . shift_time ( get_time ( request ) ) user_id = get_user_id ( request ) language = get_language ( request ) filter_names , filter_filters = list ( zip ( * sorted ( data . items ( ) ) ) ) reachable_leaves = Item . objects . filter_all_reachable_leaves_many ( filter_filters , language ) all_leaves = sorted ( list ( set ( flatten ( reachable_leaves ) ) ) ) answers = environment . number_of_answers_more_items ( all_leaves , user_id ) correct_answers = environment . number_of_correct_answers_more_items ( all_leaves , user_id ) if request . GET . get ( "mastered" ) : timer ( 'user_stats_mastered' ) mastery_threshold = get_mastery_trashold ( ) predictions = Item . objects . predict_for_overview ( environment , user_id , all_leaves ) mastered = dict ( list ( zip ( all_leaves , [ p >= mastery_threshold for p in predictions ] ) ) ) LOGGER . debug ( "user_stats - getting predictions for items took %s seconds" , ( timer ( 'user_stats_mastered' ) ) ) for identifier , items in zip ( filter_names , reachable_leaves ) : if len ( items ) == 0 : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : 0 , } else : response [ identifier ] = { "filter" : data [ identifier ] , "number_of_items" : len ( items ) , "number_of_practiced_items" : sum ( answers [ i ] > 0 for i in items ) , "number_of_answers" : sum ( answers [ i ] for i in items ) , "number_of_correct_answers" : sum ( correct_answers [ i ] for i in items ) , } if request . GET . get ( "mastered" ) : response [ identifier ] [ "number_of_mastered_items" ] = sum ( mastered [ i ] for i in items ) return render_json ( request , response , template = 'models_user_stats.html' , help_text = user_stats . __doc__ ) | Get user statistics for selected groups of items |
14,062 | def add ( self , start , end , cut_point = None , skip_rate = None , extend_loop = None ) : self . data . append ( self . parse_loop_line ( [ 'LOOP' , start , end , cut_point , skip_rate , extend_loop ] ) ) assert ( start <= end ) | Add a new loop definition . |
14,063 | def log ( self , text , key = "?????" , force = False ) : if force or self . verbose : print ">>>> {} {}" . format ( key , text ) sys . stdout . flush ( ) | Print a string to the logs . |
14,064 | def input_yes_no ( msg = '' ) : print '\n' + msg while ( True ) : i = raw_input ( 'Input yes or no: ' ) i = i . lower ( ) if i == 'y' or i == 'yes' : return True elif i == 'n' or i == 'no' : return False else : print 'ERROR: Bad input. Must enter y/n/yes/no' | Simple helper function |
14,065 | def resolve_relation_type_config ( value ) : relation_types = current_app . config [ 'PIDRELATIONS_RELATION_TYPES' ] if isinstance ( value , six . string_types ) : try : obj = next ( rt for rt in relation_types if rt . name == value ) except StopIteration : raise ValueError ( "Relation name '{0}' is not configured." . format ( value ) ) elif isinstance ( value , int ) : try : obj = next ( rt for rt in relation_types if rt . id == value ) except StopIteration : raise ValueError ( "Relation ID {0} is not configured." . format ( value ) ) else : raise ValueError ( "Type of value '{0}' is not supported for resolving." . format ( value ) ) api_class = obj_or_import_string ( obj . api ) schema_class = obj_or_import_string ( obj . schema ) return obj . __class__ ( obj . id , obj . name , obj . label , api_class , schema_class ) | Resolve the relation type to config object . |
14,066 | def match_RCSB_pdb_chains ( pdb_id1 , pdb_id2 , cut_off = 60.0 , allow_multiple_matches = False , multiple_match_error_margin = 3.0 , use_seqres_sequences_if_possible = True , strict = True ) : try : stage = pdb_id1 pdb_1 = PDB ( retrieve_pdb ( pdb_id1 ) , strict = strict ) stage = pdb_id2 pdb_2 = PDB ( retrieve_pdb ( pdb_id2 ) , strict = strict ) except ( PDBParsingException , NonCanonicalResidueException , PDBValidationException ) , e : raise PDBParsingException ( "An error occurred while loading %s: '%s'" % ( stage , str ( e ) ) ) return match_pdb_chains ( pdb_1 , pdb_id1 , pdb_2 , pdb_id2 , cut_off = cut_off , allow_multiple_matches = allow_multiple_matches , multiple_match_error_margin = multiple_match_error_margin , use_seqres_sequences_if_possible = use_seqres_sequences_if_possible ) | A convenience function for match_pdb_chains . The required arguments are two PDB IDs from the RCSB . |
14,067 | def create_resource ( output_model , rtype , unique , links , existing_ids = None , id_helper = None ) : if isinstance ( id_helper , str ) : idg = idgen ( id_helper ) elif isinstance ( id_helper , GeneratorType ) : idg = id_helper elif id_helper is None : idg = default_idgen ( None ) else : raise ValueError ( 'id_helper must be string (URL), callable or None' ) ctx = context ( None , None , output_model , base = None , idgen = idg , existing_ids = existing_ids , extras = None ) rid = I ( materialize_entity ( ctx , rtype , unique = unique ) ) if existing_ids is not None : if rid in existing_ids : return ( False , rid ) existing_ids . add ( rid ) output_model . add ( rid , VTYPE_REL , rtype ) for r , t in links : output_model . add ( rid , r , t ) return ( True , rid ) | General - purpose routine to create a new resource in the output model based on data provided |
14,068 | def _read_apps ( self ) : apps = { } for cfgfile in glob . iglob ( os . path . join ( self . confdir , '*.conf' ) ) : name = os . path . basename ( cfgfile ) [ 0 : - 5 ] try : app = AppLogParser ( name , cfgfile , self . args , self . logdir , self . fields , self . name_cache , self . report ) except ( LogRaptorOptionError , LogRaptorConfigError , LogFormatError ) as err : logger . error ( 'cannot add app %r: %s' , name , err ) else : apps [ name ] = app if not apps : raise LogRaptorConfigError ( 'no configured application in %r!' % self . confdir ) return apps | Read the configuration of applications returning a dictionary |
14,069 | def patterns ( self ) : if not self . args . patterns and not self . args . pattern_files : try : self . args . patterns . append ( self . args . files . pop ( 0 ) ) except IndexError : raise LogRaptorArgumentError ( 'PATTERN' , 'no search pattern' ) patterns = set ( ) if self . args . pattern_files : patterns . update ( [ p . rstrip ( '\n' ) for p in fileinput . input ( self . args . pattern_files ) ] ) patterns . update ( self . args . patterns ) logger . debug ( "search patterns to be processed: %r" , patterns ) if '' in patterns : logger . info ( "an empty pattern provided: match all strings!" ) return tuple ( ) try : flags = re . IGNORECASE if self . args . case else 0 | re . UNICODE return tuple ( [ re . compile ( r'(\b%s\b)' % pat if self . args . word else '(%s)' % pat , flags = flags ) for pat in patterns if pat ] ) except re . error as err : raise LogRaptorArgumentError ( 'wrong regex syntax for pattern: %r' % err ) | A tuple with re . RegexObject objects created from regex pattern arguments . |
14,070 | def files ( self ) : if not self . args . files and self . recursive : return [ '.' ] else : return self . args . files | A list of input sources . Each item can be a file path a glob path or URL . |
14,071 | def apps ( self ) : logger . debug ( "initialize applications ..." ) enabled = None apps = self . args . apps or self . _config_apps . keys ( ) unknown = set ( apps ) - set ( self . _config_apps . keys ( ) ) if unknown : raise LogRaptorArgumentError ( "--apps" , "not found apps %r" % list ( unknown ) ) if apps or enabled is None : return { k : v for k , v in self . _config_apps . items ( ) if k in apps } else : return { k : v for k , v in self . _config_apps . items ( ) if k in apps and v . enabled == enabled } | Dictionary with loaded applications . |
14,072 | def apptags ( self ) : logger . debug ( "populate tags map ..." ) apps = self . _apps . keys ( ) unknown = set ( apps ) unknown . difference_update ( self . _config_apps . keys ( ) ) if unknown : raise ValueError ( "unknown apps: %r" % list ( unknown ) ) apps = [ v for v in self . _config_apps . values ( ) if v . name in apps ] tagmap = { } for app in sorted ( apps , key = lambda x : ( x . priority , x . name ) ) : for tag in app . tags : if not tag : raise LogRaptorConfigError ( 'found an empty tag for app %r' % app . name ) try : tagmap [ tag ] . append ( app ) except KeyError : tagmap [ tag ] = [ app ] return tagmap | Map from log app - name to an application . |
14,073 | def create_dispatcher ( self ) : before_context = max ( self . args . before_context , self . args . context ) after_context = max ( self . args . after_context , self . args . context ) if self . args . files_with_match is not None or self . args . count or self . args . only_matching or self . args . quiet : return UnbufferedDispatcher ( self . _channels ) elif before_context == 0 and after_context == 0 : return UnbufferedDispatcher ( self . _channels ) elif self . args . thread : return ThreadedDispatcher ( self . _channels , before_context , after_context ) else : return LineBufferDispatcher ( self . _channels , before_context , after_context ) | Return a dispatcher for configured channels . |
14,074 | def get_config ( self ) : channels = [ sect . rsplit ( '_' ) [ 0 ] for sect in self . config . sections ( suffix = '_channel' ) ] channels . sort ( ) disabled_apps = [ app for app in self . _config_apps . keys ( ) if app not in self . _apps ] return u'' . join ( [ u"\n--- %s configuration ---" % __package__ , u"\nConfiguration file: %s" % self . config . cfgfile , u"\nConfiguration directory: %s" % self . confdir , u"\nConfigured applications: %s" % ', ' . join ( self . _config_apps . keys ( ) ) , u"\nDisabled applications: %s" % ', ' . join ( disabled_apps ) if disabled_apps else '' , u"\nFilter fields: %s" % ', ' . join ( self . config . options ( 'fields' ) ) , u"\nOutput channels: %s" % ', ' . join ( channels ) if channels else u'No channels defined' , u"\nReports: %s\n" % ', ' . join ( [ section [ : - 7 ] for section in self . config . sections ( suffix = '_report' ) ] ) , '' ] ) | Return a formatted text with main configuration parameters . |
14,075 | def get_run_summary ( self , run_stats ) : run_stats = run_stats . copy ( ) run_stats [ 'files' ] = len ( run_stats [ 'files' ] ) summary = [ u'\n--- %s run summary ---' % __package__ , u'Number of processed files: %(files)d' , u'Total lines read: %(lines)d' , u'Total log events matched: %(matches)d' , ] if any ( [ app . matches or app . unparsed for app in self . apps . values ( ) ] ) : if self . matcher == 'unruled' : summary . append ( "Applications found (application rules not used):" ) for app in filter ( lambda x : x . matches , self . apps . values ( ) ) : summary . append ( u' %s(matches=%d)' % ( app . name , app . matches ) ) else : summary . append ( "Applications found:" ) for app in filter ( lambda x : x . matches or x . unparsed , self . apps . values ( ) ) : summary . append ( u' %s(matches=%d, unparsed=%s)' % ( app . name , app . matches , app . unparsed ) ) summary . append ( '\n' ) return '\n' . join ( summary ) % run_stats | Produce a text summary from run statistics . |
14,076 | def add_template_dirs ( app ) : template_dir = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'templates' ) app . jinja_loader = jinja2 . ChoiceLoader ( [ app . jinja_loader , jinja2 . FileSystemLoader ( template_dir ) ] ) | Add flask_mongo_profiler s template directories . |
14,077 | def setup ( ) : config_name = ".wallaceconfig" config_path = os . path . join ( os . path . expanduser ( "~" ) , config_name ) if os . path . isfile ( config_path ) : log ( "Wallace config file already exists." , chevrons = False ) else : log ( "Creating Wallace config file at ~/.wallaceconfig..." , chevrons = False ) wallace_module_path = os . path . dirname ( os . path . realpath ( __file__ ) ) src = os . path . join ( wallace_module_path , "config" , config_name ) shutil . copyfile ( src , config_path ) | Walk the user though the Wallace setup . |
14,078 | def summary ( app ) : r = requests . get ( 'https://{}.herokuapp.com/summary' . format ( app ) ) summary = r . json ( ) [ 'summary' ] click . echo ( "\nstatus \t| count" ) click . echo ( "----------------" ) for s in summary : click . echo ( "{}\t| {}" . format ( s [ 0 ] , s [ 1 ] ) ) num_101s = sum ( [ s [ 1 ] for s in summary if s [ 0 ] == 101 ] ) num_10xs = sum ( [ s [ 1 ] for s in summary if s [ 0 ] >= 100 ] ) if num_10xs > 0 : click . echo ( "\nYield: {:.2%}" . format ( 1.0 * num_101s / num_10xs ) ) | Print a summary of a deployed app s status . |
14,079 | def scale_up_dynos ( id ) : config = PsiturkConfig ( ) config . load_config ( ) dyno_type = config . get ( 'Server Parameters' , 'dyno_type' ) num_dynos_web = config . get ( 'Server Parameters' , 'num_dynos_web' ) num_dynos_worker = config . get ( 'Server Parameters' , 'num_dynos_worker' ) log ( "Scaling up the dynos..." ) subprocess . call ( "heroku ps:scale web=" + str ( num_dynos_web ) + ":" + str ( dyno_type ) + " --app " + id , shell = True ) subprocess . call ( "heroku ps:scale worker=" + str ( num_dynos_worker ) + ":" + str ( dyno_type ) + " --app " + id , shell = True ) clock_on = config . getboolean ( 'Server Parameters' , 'clock_on' ) if clock_on : subprocess . call ( "heroku ps:scale clock=1:" + dyno_type + " --app " + id , shell = True ) | Scale up the Heroku dynos . |
14,080 | def deploy ( verbose , app ) : config = PsiturkConfig ( ) config . load_config ( ) config . set ( "Experiment Configuration" , "mode" , "deploy" ) config . set ( "Server Parameters" , "logfile" , "-" ) config . set ( "Shell Parameters" , "launch_in_sandbox_mode" , "false" ) deploy_sandbox_shared_setup ( verbose = verbose , app = app ) | Deploy app using Heroku to MTurk . |
14,081 | def qualify ( qualification , value , worker ) : from boto . mturk . connection import MTurkConnection config = PsiturkConfig ( ) config . load_config ( ) aws_access_key_id = config . get ( 'AWS Access' , 'aws_access_key_id' ) aws_secret_access_key = config . get ( 'AWS Access' , 'aws_secret_access_key' ) conn = MTurkConnection ( aws_access_key_id , aws_secret_access_key ) def get_workers_with_qualification ( qualification ) : results = [ ] continue_flag = True page = 1 while ( continue_flag ) : new_results = conn . get_qualifications_for_qualification_type ( qualification , page_size = 100 , page_number = page ) if ( len ( new_results ) == 0 ) : continue_flag = False else : results . extend ( new_results ) page = page + 1 return results results = get_workers_with_qualification ( qualification ) workers = [ x . SubjectId for x in results ] click . echo ( "Assigning qualification {} with value {} to worker {}" . format ( qualification , value , worker ) ) if worker in workers : result = conn . update_qualification_score ( qualification , worker , value ) else : result = conn . assign_qualification ( qualification , worker , value ) if result : click . echo ( result ) results = get_workers_with_qualification ( qualification ) click . echo ( "{} workers with qualification {}:" . format ( len ( results ) , qualification ) ) values = [ r . IntegerValue for r in results ] unique_values = list ( set ( [ r . IntegerValue for r in results ] ) ) for v in unique_values : click . echo ( "{} with value {}" . format ( len ( [ val for val in values if val == v ] ) , v ) ) | Assign a qualification to a worker . |
14,082 | def dump_database ( id ) : log ( "Generating a backup of the database on Heroku..." ) dump_filename = "data.dump" data_directory = "data" dump_dir = os . path . join ( data_directory , id ) if not os . path . exists ( dump_dir ) : os . makedirs ( dump_dir ) subprocess . call ( "heroku pg:backups capture --app " + id , shell = True ) backup_url = subprocess . check_output ( "heroku pg:backups public-url --app " + id , shell = True ) backup_url = backup_url . replace ( '"' , '' ) . rstrip ( ) backup_url = re . search ( "https:.*" , backup_url ) . group ( 0 ) print ( backup_url ) log ( "Downloading the backup..." ) dump_path = os . path . join ( dump_dir , dump_filename ) with open ( dump_path , 'wb' ) as file : subprocess . call ( [ 'curl' , '-o' , dump_path , backup_url ] , stdout = file ) return dump_path | Backup the Postgres database locally . |
14,083 | def backup ( app ) : dump_path = dump_database ( app ) config = PsiturkConfig ( ) config . load_config ( ) conn = boto . connect_s3 ( config . get ( 'AWS Access' , 'aws_access_key_id' ) , config . get ( 'AWS Access' , 'aws_secret_access_key' ) , ) bucket = conn . create_bucket ( app , location = boto . s3 . connection . Location . DEFAULT ) k = boto . s3 . key . Key ( bucket ) k . key = 'database.dump' k . set_contents_from_filename ( dump_path ) url = k . generate_url ( expires_in = 0 , query_auth = False ) log ( "The database backup URL is..." ) print ( url ) | Dump the database . |
14,084 | def create ( example ) : try : this_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) example_dir = os . path . join ( this_dir , os . pardir , "examples" , example ) shutil . copytree ( example_dir , os . path . join ( os . getcwd ( ) , example ) ) log ( "Example created." , delay = 0 ) except TypeError : click . echo ( "Example '{}' does not exist." . format ( example ) ) except OSError : click . echo ( "Example '{}' already exists here." . format ( example ) ) | Create a copy of the given example . |
14,085 | def get_datetime_interval ( timestamp , diff , offset = 0 ) : fin_datetime = datetime . datetime . fromtimestamp ( timestamp + offset ) ini_datetime = datetime . datetime . fromtimestamp ( timestamp - diff ) return ini_datetime , fin_datetime | Returns datetime interval from timestamp backward in the past computed using the milliseconds difference passed as argument . The final datetime is corrected with an optional offset . |
14,086 | def strftimegen ( start_dt , end_dt ) : if start_dt > end_dt : raise ValueError ( "the start datetime is after the end datetime: (%r,%r)" % ( start_dt , end_dt ) ) def iterftime ( string ) : date_subs = [ i for i in DATE_FORMATS if i [ 1 ] . search ( string ) is not None ] if not date_subs : yield string else : dt = start_dt date_path = string while end_dt >= dt : for item in date_subs : date_path = item [ 1 ] . sub ( dt . strftime ( item [ 0 ] ) , date_path ) yield date_path dt = dt + datetime . timedelta ( days = 1 ) return iterftime | Return a generator function for datetime format strings . The generator produce a day - by - day sequence starting from the first datetime to the second datetime argument . |
14,087 | def setup_jobs ( outpath , options , input_files ) : job_inputs = None reverse_mapping = None fasta_file_contents = { } for input_file in input_files : assert ( not ( fasta_file_contents . get ( input_file ) ) ) if any ( fnmatch ( input_file , x ) for x in pdb_file_wildcards ) : pdb = PDB . from_filepath ( input_file , strict = True ) pdb . pdb_id = os . path . basename ( input_file ) . split ( '.' ) [ 0 ] if pdb . pdb_id . startswith ( 'pdb' ) and len ( pdb . pdb_id ) >= 7 : pdb . pdb_id = pdb . pdb_id . replace ( 'pdb' , '' ) fasta_file_contents [ input_file ] = ( pdb . create_fasta ( prefer_seqres_order = False ) , 'PDB' ) else : fasta_file_contents [ input_file ] = ( read_file ( input_file ) , 'FASTA' ) found_sequences , reverse_mapping , errors = get_sequences ( options , fasta_file_contents ) if found_sequences : reformat ( found_sequences ) if errors : return None , False , errors desired_sequences = { } for key , sequence in found_sequences . iteritems ( ) : pdb_id , chain , file_name = key if options . chain is None or chain == options . chain : desired_sequences [ key ] = sequence job_inputs , errors = create_inputs ( options , outpath , desired_sequences ) if reverse_mapping : segment_mapping_file = os . path . join ( outpath , "segment_map.json" ) colorprinter . message ( "Creating a reverse mapping file %s." % segment_mapping_file ) write_file ( segment_mapping_file , json . dumps ( reverse_mapping ) ) post_processing_script = read_file ( os . path . join ( os . path . split ( os . path . realpath ( __file__ ) ) [ 0 ] , 'post_processing.py' ) ) write_file ( os . path . join ( outpath , 'post_processing.py' ) , post_processing_script , 'w' ) if options . secondary_structure_file : write_file ( os . path . join ( outpath , 'ss_filter.json' ) , json . dumps ( { 'secondary_structure_filter' : SecondaryStructureDefinition . from_filepath ( options . secondary_structure_file ) . data } ) , 'w' ) return job_inputs , reverse_mapping != None , errors | This function sets up the jobs by creating the necessary input files as expected . - outpath is where the output is to be stored . - options is the optparse options object . - input_files is a list of paths to input files . |
14,088 | def reformat ( found_sequences ) : for ( pdb_id , chain , file_name ) , sequence in sorted ( found_sequences . iteritems ( ) ) : header = sequence [ 0 ] assert ( header [ 0 ] == '>' ) tokens = header . split ( '|' ) tokens [ 0 ] = tokens [ 0 ] [ : 5 ] assert ( len ( tokens [ 0 ] ) == 5 ) sequence [ 0 ] = "|" . join ( tokens ) | Truncate the FASTA headers so that the first field is a 4 - character ID . |
14,089 | def search_configuration_files ( findstr , replacestr = None ) : F = open ( configurationFilesLocation , "r" ) lines = F . readlines ( ) F . close ( ) allerrors = { } alloutput = { } for line in lines : line = line . strip ( ) if line : if line . endswith ( "generate_fragments.py" ) : if not ( os . path . exists ( line ) ) : allerrors [ line ] = "File/directory %s does not exist." % line else : cmd = [ "grep" , "-n" , "-i" , findstr , line ] output = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) . communicate ( ) errors = output [ 1 ] output = output [ 0 ] if errors : errors = errors . strip ( ) allerrors [ line ] = errors if output : output = output . strip ( ) alloutput [ line ] = output . split ( "\n" ) return alloutput , allerrors | This function could be used to find and replace paths in the configuration files . At present it only finds phrases . |
14,090 | def get_local_ip_address ( target ) : ip_adr = '' try : s = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) s . connect ( ( target , 8000 ) ) ip_adr = s . getsockname ( ) [ 0 ] s . close ( ) except : pass return ip_adr | Get the local ip address to access one specific target . |
14,091 | def connect ( self ) : try : self . sock . connect ( ( self . host , self . port ) ) return self . sock except socket . error as ex : logging . error ( 'Exception while connecting socket on %s:%s - Error %s' % ( self . host , self . port , ex ) ) raise except Exception as ex : logging . exception ( 'Exception while connecting socket on %s:%s - Error %s' % ( self . host , self . port , ex ) ) raise | Connect socket to server |
14,092 | def send_by_packet ( self , data ) : total_sent = 0 while total_sent < PACKET_SIZE : sent = self . sock . send ( data [ total_sent : ] ) if sent == 0 : raise RuntimeError ( "socket connection broken" ) total_sent += sent return total_sent | Send data by packet on socket |
14,093 | def receive ( self , siz ) : result = bytearray ( ) data = 'x' while len ( data ) > 0 : data = self . sock . recv ( siz - len ( result ) ) result += data if len ( result ) == siz : return result if len ( result ) > siz : raise Exception ( 'Received more bytes than expected' ) raise Exception ( 'Error receiving data. %d bytes received' % len ( result ) ) | Receive a known length of bytes from a socket |
14,094 | def spawn_container ( addr , env_cls = Environment , mgr_cls = EnvManager , set_seed = True , * args , ** kwargs ) : try : import setproctitle as spt title = 'creamas: {}({})' . format ( env_cls . __class__ . __name__ , _get_base_url ( addr ) ) spt . setproctitle ( title ) except : pass if set_seed : _set_random_seeds ( ) task = start ( addr , env_cls , mgr_cls , * args , ** kwargs ) loop = asyncio . new_event_loop ( ) asyncio . set_event_loop ( loop ) loop . run_until_complete ( task ) | Spawn a new environment in a given address as a coroutine . |
14,095 | def _set_random_seeds ( ) : try : import numpy as np np . random . seed ( ) except : pass try : import scipy as sp sp . random . seed ( ) except : pass import random random . seed ( ) | Set new random seeds for the process . |
14,096 | async def report ( self , msg , timeout = 5 ) : try : host_manager = await self . env . connect ( self . host_manager , timeout = timeout ) except : raise ConnectionError ( "Could not reach host manager ({})." . format ( self . host_manager ) ) ret = await host_manager . handle ( msg ) return ret | Report message to the host manager . |
14,097 | def get_agents ( self , addr = True , agent_cls = None , as_coro = False ) : return self . env . get_agents ( addr = addr , agent_cls = agent_cls ) | Get agents from the managed environment . |
14,098 | async def get_artifacts ( self ) : host_manager = await self . env . connect ( self . _host_manager , timeout = TIMEOUT ) artifacts = await host_manager . get_artifacts ( ) return artifacts | Get all artifacts from the host environment . |
14,099 | async def spawn ( self , agent_cls , * args , addr = None , ** kwargs ) : _ , addr = await self . menv . spawn ( agent_cls , * args , addr = addr , ** kwargs ) return addr | Spawn an agent to the environment . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.