idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,100
def get_info ( self ) : 'Retrieve the data from CrossRef.' escaped_doi = urllib2 . quote ( self . doi , '' ) html = get_resource ( "www.crossref.org" , '/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search' % escaped_doi ) xml_matches = [ ] for m in re . finditer ( '(<doi_records>.*?</doi_records>)' , html , re . DOTALL ) : xml_matches . append ( m . group ( 0 ) ) if len ( xml_matches ) == 0 : raise DOIRetrievalException ( 'No matches found for the DOI "%s".' % self . doi ) elif len ( xml_matches ) == 1 : return xml_matches [ 0 ] else : raise DOIRetrievalException ( 'Multiple (%d) matches found for the DOI "%s".' % ( len ( xml_matches ) , self . doi ) )
Retrieve the data from CrossRef .
13,101
def add_backbone_atoms_linearly_from_loop_filepaths ( self , loop_json_filepath , fasta_filepath , residue_ids ) : loop_def = json . loads ( read_file ( loop_json_filepath ) ) assert ( len ( loop_def [ 'LoopSet' ] ) == 1 ) start_res = loop_def [ 'LoopSet' ] [ 0 ] [ 'start' ] end_res = loop_def [ 'LoopSet' ] [ 0 ] [ 'stop' ] start_res = PDB . ChainResidueID2String ( start_res [ 'chainID' ] , ( str ( start_res [ 'resSeq' ] ) + start_res [ 'iCode' ] ) . strip ( ) ) end_res = PDB . ChainResidueID2String ( end_res [ 'chainID' ] , ( str ( end_res [ 'resSeq' ] ) + end_res [ 'iCode' ] ) . strip ( ) ) assert ( start_res in residue_ids ) assert ( end_res in residue_ids ) f = FASTA ( read_file ( fasta_filepath ) , strict = False ) assert ( len ( f . get_sequences ( ) ) == 1 ) insertion_sequence = f . sequences [ 0 ] [ 2 ] if not len ( residue_ids ) == len ( insertion_sequence ) : raise Exception ( 'The sequence in the FASTA file must have the same length as the list of residues.' ) kept_residues = [ ] insertion_residue_map = { } in_section = False found_end = False for x in range ( len ( residue_ids ) ) : residue_id = residue_ids [ x ] if residue_id == start_res : in_section = True if in_section : kept_residues . append ( residue_id ) insertion_residue_map [ residue_id ] = insertion_sequence [ x ] if residue_id == end_res : found_end = True break if not kept_residues : raise Exception ( 'The insertion sequence is empty (check the start and end residue ids).' ) if not found_end : raise Exception ( 'The end residue was not encountered when iterating over the insertion sequence (check the start and end residue ids).' ) try : start_res = self . residues [ start_res [ 0 ] ] [ start_res [ 1 : ] ] end_res = self . residues [ end_res [ 0 ] ] [ end_res [ 1 : ] ] except Exception , e : raise Exception ( 'The start or end residue could not be found in the PDB file.' ) return self . add_backbone_atoms_linearly ( start_res , end_res , kept_residues , insertion_residue_map )
A utility wrapper around add_backbone_atoms_linearly . Adds backbone atoms in a straight line from the first to the last residue of residue_ids .
13,102
def add_atoms_linearly ( self , start_atom , end_atom , new_atoms , jitterbug = 0.2 ) : atom_name_map = { 'CA' : ' CA ' , 'C' : ' C ' , 'N' : ' N ' , 'O' : ' O ' , } assert ( start_atom . residue . chain == end_atom . residue . chain ) chain_id = start_atom . residue . chain num_new_atoms = float ( len ( new_atoms ) ) X , Y , Z = start_atom . x , start_atom . y , start_atom . z x_step = ( end_atom . x - X ) / ( num_new_atoms + 1.0 ) y_step = ( end_atom . y - Y ) / ( num_new_atoms + 1.0 ) z_step = ( end_atom . z - Z ) / ( num_new_atoms + 1.0 ) D = math . sqrt ( x_step * x_step + y_step * y_step + z_step * z_step ) jitter = 0 if jitterbug : jitter = ( ( ( x_step + y_step + z_step ) / 3.0 ) * jitterbug ) / D new_lines = [ ] next_serial_number = max ( sorted ( self . atoms . keys ( ) ) ) + 1 round = 0 for new_atom in new_atoms : X , Y , Z = X + x_step , Y + y_step , Z + z_step if jitter : if round % 3 == 0 : X , Y = X + jitter , Y - jitter elif round % 3 == 1 : Y , Z = Y + jitter , Z - jitter elif round % 3 == 2 : Z , X = Z + jitter , X - jitter round += 1 residue_id , residue_type , atom_name = new_atom assert ( len ( residue_type ) == 3 ) assert ( len ( residue_id ) == 6 ) new_lines . append ( 'ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 ' . format ( str ( next_serial_number ) . rjust ( 5 ) , atom_name_map [ atom_name ] , residue_type , residue_id , X , Y , Z ) ) next_serial_number += 1 new_pdb = [ ] in_start_residue = False for l in self . indexed_lines : if l [ 0 ] and l [ 3 ] . serial_number == start_atom . serial_number : in_start_residue = True if in_start_residue and l [ 3 ] . serial_number != start_atom . serial_number : new_pdb . extend ( new_lines ) in_start_residue = False if l [ 0 ] : new_pdb . append ( l [ 2 ] ) else : new_pdb . append ( l [ 1 ] ) return '\n' . join ( new_pdb )
A low - level function which adds new_atoms between start_atom and end_atom . This function does not validate the input i . e . the calling functions are responsible for ensuring that the insertion makes sense .
13,103
def to_string ( self ) : return '%s %s %s %s %s' % ( self . trait , self . start_position , self . peak_start_position , self . peak_stop_position , self . stop_position )
Return the string as it should be presented in a MapChart input file .
13,104
def main ( ) : preparser = argparse . ArgumentParser ( description = description , add_help = False ) preparser . add_argument ( '--env' , '-e' , dest = 'env' , default = 'development' , help = 'choose application environment' ) namespace , extra = preparser . parse_known_args ( ) env = namespace . env parser = argparse . ArgumentParser ( parents = [ preparser ] , description = description , add_help = True ) subparsers = parser . add_subparsers ( title = 'commands' , help = 'commands' ) commandadapter = CommandAdapter ( subparsers ) commandadapter . register ( glim . commands ) appcommands = import_module ( 'app.commands' , pass_errors = True ) commandadapter . register ( appcommands ) app = None if paths . app_exists ( ) is False : new = True if 'new' in extra else False if ( 'help' in extra ) or ( '--help' in extra ) or ( '-h' in extra ) : help = True else : help = False if help : parser . print_help ( ) exit ( ) else : app = make_app ( env , commandadapter ) args = parser . parse_args ( ) command = commandadapter . match ( args ) commandadapter . dispatch ( command , app )
The single entry point to glim command line interface . Main method is called from pypi console_scripts key or by glim . py on root . This function initializes a new app given the glim commands and app commands if app exists .
13,105
def make_app ( env , commandadapter = None ) : mconfig = import_module ( 'app.config.%s' % env , pass_errors = True ) if mconfig is None and paths . app_exists ( ) : print ( colored ( 'Configuration for "%s" environment is not found' % env , 'red' ) ) return None mstart = import_module ( 'app.start' ) mroutes = import_module ( 'app.routes' ) mcontrollers = import_module ( 'app.controllers' ) before = mstart . before return Glim ( commandadapter , mconfig , mroutes , mcontrollers , env , before )
Function creates an app given environment
13,106
def callback ( self ) : next = request . args . get ( 'next' ) or None endpoint = 'social.{}.handle' . format ( self . provider ) return url_for ( endpoint , _external = True , next = next )
Generate callback url for provider
13,107
def next ( self ) : next = request . args . get ( 'next' ) if next is None : params = self . default_redirect_params next = url_for ( self . default_redirect_endpoint , ** params ) return next
Where to redirect after authorization
13,108
def dispatch_request ( self ) : if current_user . is_authenticated : return redirect ( self . next ) if 'social_data' in session : del session [ 'social_data' ] res = self . app . authorized_response ( ) if res is None : if self . flash : flash ( self . auth_failed_msg , 'danger' ) return redirect ( self . next ) data = self . get_profile_data ( res ) if data is None : if self . flash : flash ( self . data_failed_msg , 'danger' ) return redirect ( self . next ) try : ok = user_service . attempt_social_login ( self . provider , data [ 'id' ] ) if ok : if self . flash : flash ( self . logged_in_msg . format ( self . provider ) , 'success' ) return redirect ( self . logged_in ) except x . AccountLocked as locked : msg = self . lock_msg . format ( locked . locked_until ) if self . flash : flash ( msg , 'danger' ) url = url_for ( self . lock_redirect , ** self . lock_redirect_params ) return redirect ( url ) except x . EmailNotConfirmed : return redirect ( url_for ( self . unconfirmed_email_endpoint ) ) email = data . get ( 'email' ) provider = data . get ( 'provider' ) id = data . get ( 'id' ) id_column = '{}_id' . format ( provider ) user = user_service . first ( email = email ) if user : setattr ( user , id_column , id ) user_service . save ( user ) if not user : cfg = current_app . config send_welcome = cfg . get ( 'USER_SEND_WELCOME_MESSAGE' ) base_confirm_url = cfg . get ( 'USER_BASE_EMAIL_CONFIRM_URL' ) if not base_confirm_url : endpoint = 'user.confirm.email.request' base_confirm_url = url_for ( endpoint , _external = True ) data = dict ( email = email ) data [ id_column ] = id user = user_service . register ( user_data = data , send_welcome = send_welcome , base_confirm_url = base_confirm_url ) if user_service . require_confirmation and not user . email_confirmed : return redirect ( url_for ( self . ok_endpoint , ** self . ok_params ) ) user_service . force_login ( user ) return redirect ( self . force_login_redirect )
Handle redirect back from provider
13,109
def project ( v , n ) : return v - matmul ( v , n ) * n / ( norm ( n ) ** 2.0 )
Project Vector v onto plane with normal vector n .
13,110
def main ( ) : args = parse_args ( ) if args . version : from . __init__ import __version__ , __release_date__ print ( 'elkme %s (release date %s)' % ( __version__ , __release_date__ ) ) print ( '(c) 2015-2017 46elks AB <hello@46elks.com>' ) print ( small_elk ) exit ( 0 ) conf , conf_status = config . init_config ( args ) if not conf_status [ 0 ] : errors . append ( conf_status [ 1 ] ) elif conf_status [ 1 ] : print ( conf_status [ 1 ] ) message = parse_message ( args ) if conf_status [ 1 ] and not message : sys . exit ( 0 ) try : elks_conn = Elks ( auth = ( conf [ 'username' ] , conf [ 'password' ] ) , api_url = conf . get ( 'api_url' ) ) except KeyError : errors . append ( 'API keys not properly set. Please refer to ' + '`elkme --usage`, `elkme --help` or ' + 'https://46elks.github.io/elkme' ) if not message : print ( USAGE , file = sys . stderr ) exit ( - 1 ) for error in errors : print ( '[ERROR] {}' . format ( error ) ) exit ( - 1 ) options = [ ] if args . flash : options . append ( 'flashsms' ) try : send_sms ( elks_conn , conf , message , length = args . length , options = options ) except ElksException as e : print ( e , file = sys . stderr )
Executed on run
13,111
def _build_graph ( self ) -> nx . DiGraph : digraph = nx . DiGraph ( ) for state in self . model . all_states ( ) : self . _number_of_states += 1 for next_state in self . model . available_state ( state ) : self . _number_of_transitions += 1 digraph . add_edge ( self . _transform_state_to_string ( state ) , self . _transform_state_to_string ( next_state ) ) return digraph
Private method to build the graph from the model .
13,112
def _transform_state_to_string ( self , state : State ) -> str : return '' . join ( str ( state [ gene ] ) for gene in self . model . genes )
Private method which transform a state to a string .
13,113
def as_dot ( self ) -> str : return nx . drawing . nx_pydot . to_pydot ( self . _graph ) . to_string ( )
Return as a string the dot version of the graph .
13,114
def export_to_dot ( self , filename : str = 'output' ) -> None : with open ( filename + '.dot' , 'w' ) as output : output . write ( self . as_dot ( ) )
Export the graph to the dot file filename . dot .
13,115
def get_duration_measures ( source_file_path , output_path = None , phonemic = False , semantic = False , quiet = False , similarity_file = None , threshold = None ) : args = Args ( ) args . output_path = output_path args . phonemic = phonemic args . semantic = semantic args . source_file_path = source_file_path args . quiet = quiet args . similarity_file = similarity_file args . threshold = threshold args = validate_arguments ( args ) if args . phonemic : response_category = args . phonemic output_prefix = os . path . basename ( args . source_file_path ) . split ( '.' ) [ 0 ] + "_vfclust_phonemic_" + args . phonemic elif args . semantic : response_category = args . semantic output_prefix = os . path . basename ( args . source_file_path ) . split ( '.' ) [ 0 ] + "_vfclust_semantic_" + args . semantic else : response_category = "" output_prefix = "" if args . output_path : target_file_path = os . path . join ( args . output_path , output_prefix + '.csv' ) else : target_file_path = False engine = VFClustEngine ( response_category = response_category , response_file_path = args . source_file_path , target_file_path = target_file_path , quiet = args . quiet , similarity_file = args . similarity_file , threshold = args . threshold ) return dict ( engine . measures )
Parses input arguments and runs clustering algorithm .
13,116
def create_from_textgrid ( self , word_list ) : self . timing_included = True for i , entry in enumerate ( word_list ) : self . unit_list . append ( Unit ( entry , format = "TextGrid" , type = self . type , index_in_timed_response = i ) ) if self . type == "SEMANTIC" : self . lemmatize ( ) self . tokenize ( )
Fills the ParsedResponse object with a list of TextGrid . Word objects originally from a . TextGrid file .
13,117
def lemmatize ( self ) : for unit in self . unit_list : if lemmatizer . lemmatize ( unit . text ) in self . lemmas : unit . text = lemmatizer . lemmatize ( unit . text )
Lemmatize all Units in self . unit_list .
13,118
def tokenize ( self ) : if not self . quiet : print print "Finding compound words..." compound_word_dict = { } for compound_length in range ( 5 , 1 , - 1 ) : compound_word_dict [ compound_length ] = [ name for name in self . names if len ( name . split ( ) ) == compound_length ] current_index = 0 finished = False while not finished : for compound_length in range ( 5 , 1 , - 1 ) : if current_index + compound_length - 1 < len ( self . unit_list ) : compound_word = "" for word in self . unit_list [ current_index : current_index + compound_length ] : compound_word += " " + word . text compound_word = compound_word . strip ( ) if compound_word in compound_word_dict [ compound_length ] : self . make_compound_word ( start_index = current_index , how_many = compound_length ) current_index += 1 break else : current_index += 1 if current_index >= len ( self . unit_list ) : finished = True
Tokenizes all multiword names in the list of Units .
13,119
def make_compound_word ( self , start_index , how_many ) : if not self . quiet : compound_word = "" for word in self . unit_list [ start_index : start_index + how_many ] : compound_word += " " + word . text print compound_word . strip ( ) , " , "_" . join ( compound_word . split ( ) ) for other_unit in range ( 1 , how_many ) : self . unit_list [ start_index ] . original_text . append ( self . unit_list [ start_index + other_unit ] . text ) self . unit_list [ start_index ] . text += "_" + self . unit_list [ start_index + other_unit ] . text self . unit_list [ start_index ] . end_time = self . unit_list [ start_index + how_many - 1 ] . end_time self . unit_list = self . unit_list [ : start_index + 1 ] + self . unit_list [ start_index + how_many : ]
Combines two Units in self . unit_list to make a compound word token .
13,120
def remove_unit ( self , index ) : if not self . quiet : print "Removing" , self . unit_list [ index ] . text self . unit_list . pop ( index )
Removes the unit at the given index in self . unit_list . Does not modify any other units .
13,121
def combine_same_stem_units ( self , index ) : if not self . quiet : combined_word = "" for word in self . unit_list [ index : index + 2 ] : for original_word in word . original_text : combined_word += " " + original_word print combined_word . strip ( ) , " , "/" . join ( combined_word . split ( ) ) self . unit_list [ index ] . original_text . append ( self . unit_list [ index + 1 ] . text ) self . unit_list [ index ] . end_time = self . unit_list [ index + 1 ] . end_time self . unit_list . pop ( index + 1 )
Combines adjacent words with the same stem into a single unit .
13,122
def display ( self ) : table_list = [ ] table_list . append ( ( "Text" , "Orig. Text" , "Start time" , "End time" , "Phonetic" ) ) for unit in self . unit_list : table_list . append ( ( unit . text , "/" . join ( unit . original_text ) , unit . start_time , unit . end_time , unit . phonetic_representation ) ) print_table ( table_list )
Pretty - prints the ParsedResponse to the screen .
13,123
def generate_phonetic_representation ( self , word ) : with NamedTemporaryFile ( ) as temp_file : temp_file . write ( word ) t2pargs = [ os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , 't2p/t2p' ) ) , '-transcribe' , os . path . join ( data_path , 'cmudict.0.7a.tree' ) , temp_file . name ] temp_file . seek ( 0 ) output , error = subprocess . Popen ( t2pargs , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) . communicate ( ) output = output . split ( ) phonetic_representation = output [ 1 : ] return phonetic_representation
Returns a generated phonetic representation for a word .
13,124
def modify_phonetic_representation ( self , phonetic_representation ) : for i in range ( len ( phonetic_representation ) ) : phonetic_representation [ i ] = re . sub ( '\d+' , '' , phonetic_representation [ i ] ) multis = [ 'AA' , 'AE' , 'AH' , 'AO' , 'AW' , 'AY' , 'CH' , 'DH' , 'EH' , 'ER' , 'EY' , 'HH' , 'IH' , 'IY' , 'JH' , 'NG' , 'OW' , 'OY' , 'SH' , 'TH' , 'UH' , 'UW' , 'ZH' ] singles = [ 'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' , 'i' , 'j' , 'k' , 'l' , 'm' , 'n' , 'o' , 'p' , 'q' , 'r' , 's' , 't' , 'u' , 'v' , 'w' ] for i in range ( len ( phonetic_representation ) ) : if phonetic_representation [ i ] in multis : phonetic_representation [ i ] = singles [ multis . index ( phonetic_representation [ i ] ) ] phonetic_representation = '' . join ( phonetic_representation ) return phonetic_representation
Returns a compact phonetic representation given a CMUdict - formatted representation .
13,125
def clean ( self ) : if not self . quiet : print print "Preprocessing input..." print "Raw response:" print self . display ( ) if not self . quiet : print print "Cleaning words..." current_index = 0 while current_index < len ( self . unit_list ) : word = self . unit_list [ current_index ] . text if self . type == "PHONETIC" : test = ( word . startswith ( self . letter_or_category ) and not word . endswith ( '-' ) and '_' not in word and word . lower ( ) in self . english_words ) elif self . type == "SEMANTIC" : test = word in self . permissible_words if not test : self . remove_unit ( index = current_index ) else : current_index += 1 current_index = 0 finished = False while current_index < len ( self . unit_list ) - 1 : if stemmer . stem ( self . unit_list [ current_index ] . text ) == stemmer . stem ( self . unit_list [ current_index + 1 ] . text ) : self . combine_same_stem_units ( index = current_index ) else : current_index += 1 if self . type == "PHONETIC" : for unit in self . unit_list : word = unit . text if word in self . cmudict : phonetic_representation = self . cmudict [ word ] if word not in self . cmudict : phonetic_representation = self . generate_phonetic_representation ( word ) phonetic_representation = self . modify_phonetic_representation ( phonetic_representation ) unit . phonetic_representation = phonetic_representation if not self . quiet : print print "Cleaned response:" print self . display ( )
Removes any Units that are not applicable given the current semantic or phonetic category .
13,126
def load_lsa_information ( self ) : if not ( 49 < int ( self . clustering_parameter ) < 101 ) : raise Exception ( 'Only LSA dimensionalities in the range 50-100' + ' are supported.' ) if not self . quiet : print "Loading LSA term vectors..." with open ( os . path . join ( data_path , self . category + '_' + os . path . join ( 'term_vector_dictionaries' , 'term_vectors_dict' + str ( self . clustering_parameter ) + '_cpickle.dat' ) ) , 'rb' ) as infile : self . term_vectors = pickle . load ( infile )
Loads a dictionary from disk that maps permissible words to their LSA term vectors .
13,127
def get_similarity_measures ( self ) : if not self . quiet : print print "Computing" , self . current_similarity_measure , "similarity..." self . compute_similarity_scores ( )
Helper function for computing similarity measures .
13,128
def get_raw_counts ( self ) : words = [ ] labels = [ ] words_said = set ( ) for unit in self . parsed_response : word = unit . text test = False if self . type == "PHONETIC" : test = ( word . startswith ( self . letter ) and "T_" not in word and "E_" not in word and "!" not in word and "FILLEDPAUSE_" not in word and not word . endswith ( '-' ) and word . lower ( ) in self . english_words ) elif self . type == "SEMANTIC" : test = ( word in self . permissible_words ) if test : self . measures [ 'COUNT_total_words' ] += 1 self . measures [ 'COUNT_permissible_words' ] += 1 if any ( word == w for w in words_said ) : self . measures [ 'COUNT_exact_repetitions' ] += 1 labels . append ( 'EXACT REPETITION' ) elif any ( stemmer . stem ( word ) == stemmer . stem ( w ) for w in words_said ) : self . measures [ 'COUNT_stem_repetitions' ] += 1 labels . append ( 'STEM REPETITION' ) else : labels . append ( 'PERMISSIBLE WORD' ) words_said . add ( word ) words . append ( word ) elif word . lower ( ) . startswith ( 'e_' ) : self . measures [ 'COUNT_examiner_words' ] += 1 words . append ( word ) labels . append ( 'EXAMINER WORD' ) elif word . endswith ( '-' ) : self . measures [ 'COUNT_word_fragments' ] += 1 words . append ( word ) labels . append ( 'WORD FRAGMENT' ) elif word . lower ( ) . startswith ( 'filledpause' ) : self . measures [ 'COUNT_filled_pauses' ] += 1 words . append ( word ) labels . append ( 'FILLED PAUSE' ) elif word . lower ( ) not in [ '!sil' , 't_noise' , 't_cough' , 't_lipsmack' , 't_breath' ] : self . measures [ 'COUNT_total_words' ] += 1 self . measures [ 'COUNT_asides' ] += 1 words . append ( word ) labels . append ( 'ASIDE' ) if not self . quiet : print print "Labels:" print_table ( [ ( word , label ) for word , label in zip ( words , labels ) ] ) self . measures [ 'COUNT_unique_permissible_words' ] = self . measures [ 'COUNT_permissible_words' ] - self . measures [ 'COUNT_exact_repetitions' ] - self . measures [ 'COUNT_stem_repetitions' ] if not self . quiet : print print "Counts:" collection_measures = [ x for x in self . measures if x . startswith ( "COUNT_" ) ] collection_measures . sort ( ) if not self . quiet : print_table ( [ ( k , str ( self . measures [ k ] ) ) for k in collection_measures ] )
Determines counts for unique words repetitions etc using the raw text response .
13,129
def compute_similarity_score ( self , unit1 , unit2 ) : if self . type == "PHONETIC" : word1 = unit1 . phonetic_representation word2 = unit2 . phonetic_representation if self . current_similarity_measure == "phone" : word1_length , word2_length = len ( word1 ) , len ( word2 ) if word1_length > word2_length : word1 , word2 = word2 , word1 word1_length , word2_length = word2_length , word1_length current = range ( word1_length + 1 ) for i in range ( 1 , word2_length + 1 ) : previous , current = current , [ i ] + [ 0 ] * word1_length for j in range ( 1 , word1_length + 1 ) : add , delete = previous [ j ] + 1 , current [ j - 1 ] + 1 change = previous [ j - 1 ] if word1 [ j - 1 ] != word2 [ i - 1 ] : change += 1 current [ j ] = min ( add , delete , change ) phonetic_similarity_score = 1 - current [ word1_length ] / word2_length return phonetic_similarity_score elif self . current_similarity_measure == "biphone" : if word1 [ : 2 ] == word2 [ : 2 ] or word1 [ - 2 : ] == word2 [ - 2 : ] : common_biphone_score = 1 else : common_biphone_score = 0 return common_biphone_score elif self . type == "SEMANTIC" : word1 = unit1 . text word2 = unit2 . text if self . current_similarity_measure == "lsa" : w1_vec = self . term_vectors [ word1 ] w2_vec = self . term_vectors [ word2 ] dot = sum ( [ w1 * w2 for w1 , w2 in zip ( w1_vec , w2_vec ) ] ) norm1 = sqrt ( sum ( [ w * w for w in w1_vec ] ) ) norm2 = sqrt ( sum ( [ w * w for w in w2_vec ] ) ) semantic_relatedness_score = dot / ( norm1 * norm2 ) return semantic_relatedness_score elif self . current_similarity_measure == "custom" : try : similarity = self . custom_similarity_scores [ ( word1 , word2 ) ] except KeyError : try : similarity = self . custom_similarity_scores [ ( word2 , word1 ) ] except KeyError : if word1 == word2 : return self . same_word_similarity else : return 0 return similarity return None
Returns the similarity score between two words .
13,130
def compute_similarity_scores ( self ) : for i , unit in enumerate ( self . parsed_response ) : if i < len ( self . parsed_response ) - 1 : next_unit = self . parsed_response [ i + 1 ] self . similarity_scores . append ( self . compute_similarity_score ( unit , next_unit ) ) if not self . quiet : print self . current_similarity_measure , "similarity scores (adjacent) -- higher is closer:" table = [ ( "Word 1" , "Word 2" , "Score" ) ] + [ ( self . parsed_response [ i ] . text , self . parsed_response [ i + 1 ] . text , "{0:.3f}" . format ( round ( self . similarity_scores [ i ] , 2 ) ) ) for i in range ( len ( self . parsed_response ) - 1 ) ] print_table ( table )
Produce a list of similarity scores for each contiguous pair in a response .
13,131
def compute_pairwise_similarity_score ( self ) : pairs = [ ] all_scores = [ ] for i , unit in enumerate ( self . parsed_response ) : for j , other_unit in enumerate ( self . parsed_response ) : if i != j : pair = ( i , j ) rev_pair = ( j , i ) if pair not in pairs and rev_pair not in pairs : score = self . compute_similarity_score ( unit , other_unit ) pairs . append ( pair ) pairs . append ( rev_pair ) all_scores . append ( score ) all_scores = [ i for i in all_scores if i != self . same_word_similarity ] self . measures [ "COLLECTION_" + self . current_similarity_measure + "_pairwise_similarity_score_mean" ] = get_mean ( all_scores ) if len ( pairs ) > 0 else 'NA'
Computes the average pairwise similarity score between all pairs of Units .
13,132
def compute_collection_measures ( self , no_singletons = False ) : prefix = "COLLECTION_" + self . current_similarity_measure + "_" + self . current_collection_type + "_" if no_singletons : prefix += "no_singletons_" if no_singletons : collection_sizes_temp = [ x for x in self . collection_sizes if x != 1 ] else : collection_sizes_temp = self . collection_sizes self . measures [ prefix + 'count' ] = len ( collection_sizes_temp ) self . measures [ prefix + 'size_mean' ] = get_mean ( collection_sizes_temp ) if self . measures [ prefix + 'count' ] > 0 else 0 self . measures [ prefix + 'size_max' ] = max ( collection_sizes_temp ) if len ( collection_sizes_temp ) > 0 else 0 self . measures [ prefix + 'switch_count' ] = self . measures [ prefix + 'count' ] - 1
Computes summaries of measures using the discovered collections .
13,133
def compute_duration_measures ( self ) : prefix = "TIMING_" + self . current_similarity_measure + "_" + self . current_collection_type + "_" if self . response_format == 'TextGrid' : self . compute_response_vowel_duration ( "TIMING_" ) self . compute_response_continuant_duration ( "TIMING_" ) self . compute_between_collection_interval_duration ( prefix ) self . compute_within_collection_interval_duration ( prefix ) self . compute_within_collection_vowel_duration ( prefix , no_singletons = True ) self . compute_within_collection_continuant_duration ( prefix , no_singletons = True ) self . compute_within_collection_vowel_duration ( prefix , no_singletons = False ) self . compute_within_collection_continuant_duration ( prefix , no_singletons = False )
Helper function for computing measures derived from timing information .
13,134
def compute_response_vowel_duration ( self , prefix ) : durations = [ ] for word in self . full_timed_response : if word . phones : for phone in word . phones : if phone . string in self . vowels : durations . append ( phone . end - phone . start ) self . measures [ prefix + 'response_vowel_duration_mean' ] = get_mean ( durations ) if len ( durations ) > 0 else 'NA' if not self . quiet : print "Mean response vowel duration:" , self . measures [ prefix + 'response_vowel_duration_mean' ]
Computes mean vowel duration in entire response .
13,135
def compute_between_collection_interval_duration ( self , prefix ) : durations = [ ] for collection in self . collection_list : start = collection [ 0 ] . start_time end = collection [ - 1 ] . end_time durations . append ( ( start , end ) ) interstices = [ durations [ i + 1 ] [ 0 ] - durations [ i ] [ 1 ] for i , d in enumerate ( durations [ : - 1 ] ) ] for i , entry in enumerate ( interstices ) : if interstices [ i ] < 0 : interstices [ i ] = 0 self . measures [ prefix + 'between_collection_interval_duration_mean' ] = get_mean ( interstices ) if len ( interstices ) > 0 else 'NA' if not self . quiet : print print self . current_similarity_measure + " between-" + self . current_collection_type + " durations" table = [ ( self . current_collection_type + " 1 (start,end)" , "Interval" , self . current_collection_type + " 2 (start,end)" ) ] + [ ( str ( d1 ) , str ( i1 ) , str ( d2 ) ) for d1 , i1 , d2 in zip ( durations [ : - 1 ] , interstices , durations [ 1 : ] ) ] print_table ( table ) print print "Mean " + self . current_similarity_measure + " between-" + self . current_collection_type + " duration" , self . measures [ prefix + 'between_collection_interval_duration_mean' ]
Calculates BETWEEN - collection intervals for the current collection and measure type and takes their mean .
13,136
def compute_within_collection_interval_duration ( self , prefix ) : interstices = [ ] for cluster in self . collection_list : if len ( cluster ) > 1 : for i in range ( len ( cluster ) ) : if i != len ( cluster ) - 1 : interstice = cluster [ i + 1 ] . start_time - cluster [ i ] . end_time interstices . append ( interstice ) self . measures [ prefix + 'within_collection_interval_duration_mean' ] = get_mean ( interstices ) if len ( interstices ) > 0 else 'NA' if not self . quiet : print "Mean within-" + self . current_similarity_measure + "-" + self . current_collection_type + " between-word duration:" , self . measures [ prefix + 'within_collection_interval_duration_mean' ]
Calculates mean between - word duration WITHIN collections .
13,137
def compute_within_collection_vowel_duration ( self , prefix , no_singletons = False ) : if no_singletons : min_size = 2 else : prefix += "no_singletons_" min_size = 1 durations = [ ] for cluster in self . collection_list : if len ( cluster ) >= min_size : for word in cluster : word = self . full_timed_response [ word . index_in_timed_response ] for phone in word . phones : if phone . string in self . vowels : durations . append ( phone . end - phone . start ) self . measures [ prefix + 'within_collection_vowel_duration_mean' ] = get_mean ( durations ) if len ( durations ) > 0 else 'NA' if not self . quiet : if no_singletons : print "Mean within-" + self . current_similarity_measure + "-" + self . current_collection_type + " vowel duration, excluding singletons:" , self . measures [ prefix + 'within_collection_vowel_duration_mean' ] else : print "Mean within-" + self . current_similarity_measure + "-" + self . current_collection_type + " vowel duration, including singletons:" , self . measures [ prefix + 'within_collection_vowel_duration_mean' ]
Computes the mean duration of vowels from Units within clusters .
13,138
def print_output ( self ) : if self . response_format == "csv" : for key in self . measures : if "TIMING_" in key : self . measures [ key ] = "NA" if not self . quiet : print print self . type . upper ( ) + " RESULTS:" keys = [ e for e in self . measures if 'COUNT_' in e ] keys . sort ( ) print "Counts:" print_table ( [ ( entry , str ( self . measures [ entry ] ) ) for entry in keys ] ) keys = [ e for e in self . measures if 'COLLECTION_' in e ] keys . sort ( ) print print "Collection measures:" print_table ( [ ( entry , str ( self . measures [ entry ] ) ) for entry in keys ] ) if self . response_format == "TextGrid" : keys = [ e for e in self . measures if 'TIMING_' in e ] keys . sort ( ) print print "Time-based measures:" print_table ( [ ( entry , str ( self . measures [ entry ] ) ) for entry in keys ] ) if self . target_file : with open ( self . target_file , 'w' ) as outfile : header = [ 'file_id' ] + [ self . type + "_" + e for e in self . measures if 'COUNT_' in e ] + [ self . type + "_" + e for e in self . measures if 'COLLECTION_' in e ] + [ self . type + "_" + e for e in self . measures if 'TIMING_' in e ] writer = csv . writer ( outfile , quoting = csv . QUOTE_MINIMAL ) writer . writerow ( header ) writer . writerow ( [ self . measures [ "file_id" ] ] + [ self . measures [ "_" . join ( e . split ( '_' ) [ 1 : ] ) ] for e in header [ 1 : ] ] )
Outputs final list of measures to screen a csv file .
13,139
def insert_child ( self , child_pid , index = - 1 ) : if child_pid . status != PIDStatus . REGISTERED : raise PIDRelationConsistencyError ( "Version PIDs should have status 'REGISTERED'. Use " "insert_draft_child to insert 'RESERVED' draft PID." ) with db . session . begin_nested ( ) : draft = self . draft_child if draft and index == - 1 : index = self . index ( draft ) super ( PIDNodeVersioning , self ) . insert_child ( child_pid , index = index ) self . update_redirect ( )
Insert a Version child PID .
13,140
def remove_child ( self , child_pid ) : if child_pid . status == PIDStatus . RESERVED : raise PIDRelationConsistencyError ( "Version PIDs should not have status 'RESERVED'. Use " "remove_draft_child to remove a draft PID." ) with db . session . begin_nested ( ) : super ( PIDNodeVersioning , self ) . remove_child ( child_pid , reorder = True ) self . update_redirect ( )
Remove a Version child PID .
13,141
def insert_draft_child ( self , child_pid ) : if child_pid . status != PIDStatus . RESERVED : raise PIDRelationConsistencyError ( "Draft child should have status 'RESERVED'" ) if not self . draft_child : with db . session . begin_nested ( ) : super ( PIDNodeVersioning , self ) . insert_child ( child_pid , index = - 1 ) else : raise PIDRelationConsistencyError ( "Draft child already exists for this relation: {0}" . format ( self . draft_child ) )
Insert a draft child to versioning .
13,142
def remove_draft_child ( self ) : if self . draft_child : with db . session . begin_nested ( ) : super ( PIDNodeVersioning , self ) . remove_child ( self . draft_child , reorder = True )
Remove the draft child from versioning .
13,143
def update_redirect ( self ) : if self . last_child : self . _resolved_pid . redirect ( self . last_child ) elif any ( map ( lambda pid : pid . status not in [ PIDStatus . DELETED , PIDStatus . REGISTERED , PIDStatus . RESERVED ] , super ( PIDNodeVersioning , self ) . children . all ( ) ) ) : raise PIDRelationConsistencyError ( "Invalid relation state. Only REGISTERED, RESERVED " "and DELETED PIDs are supported." )
Update the parent redirect to the current last child .
13,144
def feedback ( request ) : if request . method == 'GET' : return render ( request , 'feedback_feedback.html' , { } , help_text = feedback . __doc__ ) if request . method == 'POST' : feedback_data = json_body ( request . body . decode ( "utf-8" ) ) feedback_data [ 'user_agent' ] = Session . objects . get_current_session ( ) . http_user_agent . content if not feedback_data . get ( 'username' ) : feedback_data [ 'username' ] = request . user . username if not feedback_data . get ( 'email' ) : feedback_data [ 'email' ] = request . user . email comment = Comment . objects . create ( username = feedback_data [ 'username' ] , email = feedback_data [ 'email' ] , text = feedback_data [ 'text' ] ) if get_config ( 'proso_feedback' , 'send_emails' , default = True ) : feedback_domain = get_config ( 'proso_feedback' , 'domain' , required = True ) feedback_to = get_config ( 'proso_feedback' , 'to' , required = True ) if is_likely_worthless ( feedback_data ) : mail_from = 'spam@' + feedback_domain else : mail_from = 'feedback@' + feedback_domain text_content = render_to_string ( "emails/feedback.plain.txt" , { "feedback" : feedback_data , "user" : request . user , } ) html_content = render_to_string ( "emails/feedback.html" , { "feedback" : feedback_data , "user" : request . user , } ) subject = feedback_domain + ' feedback ' + str ( comment . id ) mail = EmailMultiAlternatives ( subject , text_content , mail_from , feedback_to , ) mail . attach_alternative ( html_content , "text/html" ) mail . send ( ) LOGGER . debug ( "email sent %s\n" , text_content ) return HttpResponse ( 'ok' , status = 201 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Send feedback to the authors of the system .
13,145
def rating ( request ) : if request . method == 'GET' : return render ( request , 'feedback_rating.html' , { } , help_text = rating . __doc__ ) if request . method == 'POST' : data = json_body ( request . body . decode ( "utf-8" ) ) if data [ 'value' ] not in list ( range ( 1 , 9 ) ) : return render_json ( request , { 'error' : _ ( 'The given value is not valid.' ) , 'error_type' : 'invalid_value' } , template = 'feedback_json.html' , status = 400 ) rating_object = Rating ( user = request . user , value = data [ 'value' ] , ) rating_object . save ( ) return HttpResponse ( 'ok' , status = 201 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Rate the current practice .
13,146
def colour ( colour , message , bold = False ) : return style ( fg = colour , text = message , bold = bold )
Color a message
13,147
def customFilter ( self , filterFunc ) : ret = self . __class__ ( ) for item in self : if filterFunc ( item ) : ret . append ( item ) return ret
customFilter - Apply a custom filter to elements and return a QueryableList of matches
13,148
def sort_by ( self , fieldName , reverse = False ) : return self . __class__ ( sorted ( self , key = lambda item : self . _get_item_value ( item , fieldName ) , reverse = reverse ) )
sort_by - Return a copy of this collection sorted by the given fieldName .
13,149
def uri ( self ) : return self . uri_template . format ( host = self . host , port = "" if self . port is None else self . port , database = self . database , username = self . username , password = "" if self . password is None else self . password , has_password = "" if self . password is None else ":" , has_port = "" if self . port is None else ":" , )
Return sqlalchemy connect string URI .
13,150
def from_json ( cls , json_file , json_path = None , key_mapping = None ) : cls . _validate_key_mapping ( key_mapping ) with open ( json_file , "rb" ) as f : data = json . loads ( f . read ( ) . decode ( "utf-8" ) ) return cls . _from_json_data ( data , json_path , key_mapping )
Load connection credential from json file .
13,151
def from_s3_json ( cls , bucket_name , key , json_path = None , key_mapping = None , aws_profile = None , aws_access_key_id = None , aws_secret_access_key = None , region_name = None ) : import boto3 ses = boto3 . Session ( aws_access_key_id = aws_access_key_id , aws_secret_access_key = aws_secret_access_key , region_name = region_name , profile_name = aws_profile , ) s3 = ses . resource ( "s3" ) bucket = s3 . Bucket ( bucket_name ) object = bucket . Object ( key ) data = json . loads ( object . get ( ) [ "Body" ] . read ( ) . decode ( "utf-8" ) ) return cls . _from_json_data ( data , json_path , key_mapping )
Load database credential from json on s3 .
13,152
def from_env ( cls , prefix , kms_decrypt = False , aws_profile = None ) : if len ( prefix ) < 1 : raise ValueError ( "prefix can't be empty" ) if len ( set ( prefix ) . difference ( set ( string . ascii_uppercase + "_" ) ) ) : raise ValueError ( "prefix can only use [A-Z] and '_'!" ) if not prefix . endswith ( "_" ) : prefix = prefix + "_" data = dict ( host = os . getenv ( prefix + "HOST" ) , port = os . getenv ( prefix + "PORT" ) , database = os . getenv ( prefix + "DATABASE" ) , username = os . getenv ( prefix + "USERNAME" ) , password = os . getenv ( prefix + "PASSWORD" ) , ) if kms_decrypt is True : import boto3 from base64 import b64decode if aws_profile is not None : kms = boto3 . client ( "kms" ) else : ses = boto3 . Session ( profile_name = aws_profile ) kms = ses . client ( "kms" ) def decrypt ( kms , text ) : return kms . decrypt ( CiphertextBlob = b64decode ( text . encode ( "utf-8" ) ) ) [ "Plaintext" ] . decode ( "utf-8" ) data = { key : value if value is None else decrypt ( kms , str ( value ) ) for key , value in data . items ( ) } return cls ( ** data )
Load database credential from env variable .
13,153
def to_dict ( self ) : return dict ( host = self . host , port = self . port , database = self . database , username = self . username , password = self . password , )
Convert credentials into a dict .
13,154
def start_debugging ( ) : exc_type , exc_value , exc_tb = sys . exc_info ( ) if hasattr ( exc_value , '_ipdbugger_let_raise' ) : raise_ ( * sys . exc_info ( ) ) print ( ) for line in traceback . format_exception ( exc_type , exc_value , exc_tb ) : print ( colored ( line , 'red' ) , end = ' ' ) test_frame = sys . _getframe ( - 1 ) . f_back from ipdb . __main__ import wrap_sys_excepthook wrap_sys_excepthook ( ) IPDBugger ( exc_info = sys . exc_info ( ) ) . set_trace ( test_frame )
Start a debugging session after catching an exception .
13,155
def get_last_lineno ( node ) : max_lineno = 0 if hasattr ( node , "lineno" ) : max_lineno = node . lineno for _ , field in ast . iter_fields ( node ) : if isinstance ( field , list ) : for value in field : if isinstance ( value , ast . AST ) : max_lineno = max ( max_lineno , get_last_lineno ( value ) ) elif isinstance ( field , ast . AST ) : max_lineno = max ( max_lineno , get_last_lineno ( field ) ) return max_lineno
Recursively find the last line number of the ast node .
13,156
def do_raise ( self , arg ) : self . do_continue ( arg ) _ , exc_value , _ = self . exc_info exc_value . _ipdbugger_let_raise = True raise_ ( * self . exc_info )
Raise the last exception caught .
13,157
def do_retry ( self , arg ) : prev_line = self . curframe . f_lineno - 1 while True : try : self . curframe . f_lineno = prev_line break except ValueError : prev_line -= 1 self . do_jump ( prev_line ) self . do_continue ( arg ) return 1
Rerun the previous command .
13,158
def dispatch_line ( self , frame ) : callback = TerminalPdb . dispatch_line ( self , frame ) if self . stoplineno == - 1 : return None return callback
Handle line action and return the next line callback .
13,159
def wrap_with_try ( self , node ) : handlers = [ ] if self . ignore_exceptions is None : handlers . append ( ast . ExceptHandler ( type = None , name = None , body = [ ast . Raise ( ) ] ) ) else : ignores_nodes = self . ignore_exceptions handlers . append ( ast . ExceptHandler ( type = ast . Tuple ( ignores_nodes , ast . Load ( ) ) , name = None , body = [ ast . Raise ( ) ] ) ) if self . catch_exception is None or get_node_value ( self . catch_exception ) not in ( get_node_value ( ast_node ) for ast_node in self . ignore_exceptions ) : call_extra_parameters = [ ] if IS_PYTHON_3 else [ None , None ] start_debug_cmd = ast . Expr ( value = ast . Call ( ast . Name ( "start_debugging" , ast . Load ( ) ) , [ ] , [ ] , * call_extra_parameters ) ) catch_exception_type = None if self . catch_exception is not None : catch_exception_type = self . catch_exception handlers . append ( ast . ExceptHandler ( type = catch_exception_type , name = None , body = [ start_debug_cmd ] ) ) try_except_extra_params = { "finalbody" : [ ] } if IS_PYTHON_3 else { } new_node = self . ast_try_except ( orelse = [ ] , body = [ node ] , handlers = handlers , ** try_except_extra_params ) return ast . copy_location ( new_node , node )
Wrap an ast node in a try node to enter debug on exception .
13,160
def try_except_handler ( self , node ) : excepted_types = [ ] for handler in node . handlers : if handler . type is None : excepted_types = None break if isinstance ( handler . type , ast . Tuple ) : excepted_types . extend ( [ exception_type for exception_type in handler . type . elts ] ) else : excepted_types . append ( handler . type ) new_exception_list = self . ignore_exceptions if self . ignore_exceptions is not None : if excepted_types is None : new_exception_list = None else : new_exception_list = list ( set ( excepted_types + self . ignore_exceptions ) ) old_exception_handlers , self . ignore_exceptions = self . ignore_exceptions , new_exception_list node . body = [ self . visit ( node_item ) for node_item in node . body ] self . ignore_exceptions = old_exception_handlers
Handler for try except statement to ignore excepted exceptions .
13,161
def visit_Call ( self , node ) : if self . depth == 0 : return node if self . ignore_exceptions is None : ignore_exceptions = ast . Name ( "None" , ast . Load ( ) ) else : ignore_exceptions = ast . List ( self . ignore_exceptions , ast . Load ( ) ) catch_exception_type = self . catch_exception if self . catch_exception else "None" catch_exception = ast . Name ( catch_exception_type , ast . Load ( ) ) depth = ast . Num ( self . depth - 1 if self . depth > 0 else - 1 ) debug_node_name = ast . Name ( "debug" , ast . Load ( ) ) call_extra_parameters = [ ] if IS_PYTHON_3 else [ None , None ] node . func = ast . Call ( debug_node_name , [ node . func , ignore_exceptions , catch_exception , depth ] , [ ] , * call_extra_parameters ) return node
Propagate debug wrapper into inner function calls if needed .
13,162
def get_qtls_from_rqtl_data ( matrix , lod_threshold ) : t_matrix = list ( zip ( * matrix ) ) qtls = [ [ 'Trait' , 'Linkage Group' , 'Position' , 'Exact marker' , 'LOD' ] ] for row in t_matrix [ 3 : ] : lgroup = None max_lod = None peak = None cnt = 1 while cnt < len ( row ) : if lgroup is None : lgroup = t_matrix [ 1 ] [ cnt ] if lgroup == t_matrix [ 1 ] [ cnt ] : if max_lod is None : max_lod = float ( row [ cnt ] ) if float ( row [ cnt ] ) > float ( max_lod ) : max_lod = float ( row [ cnt ] ) peak = cnt else : if max_lod and float ( max_lod ) > float ( lod_threshold ) and peak : qtl = [ row [ 0 ] , t_matrix [ 1 ] [ peak ] , t_matrix [ 2 ] [ peak ] , t_matrix [ 0 ] [ peak ] , max_lod , ] qtls . append ( qtl ) lgroup = None max_lod = None peak = cnt cnt = cnt + 1 return qtls
Retrieve the list of significants QTLs for the given input matrix and using the specified LOD threshold . This assumes one QTL per linkage group .
13,163
def save_session ( self , * args , ** kwargs ) : if g . get ( 'stateless_sessions' ) : return return super ( BoilerSessionInterface , self ) . save_session ( * args , ** kwargs )
Save session Skip setting session cookie if requested via g . stateless_sessions
13,164
def prune_old ( self ) : path = self . pubdir dirmask = self . dirmask expire = self . expire expire_limit = int ( time . time ( ) ) - ( 86400 * expire ) logger . info ( 'Pruning directories older than %d days' , expire ) if not os . path . isdir ( path ) : logger . warning ( 'Dir %r not found -- skipping pruning' , path ) return for entry in os . listdir ( path ) : logger . debug ( 'Found: %r' , entry ) if os . path . isdir ( os . path . join ( path , entry ) ) : try : stamp = time . mktime ( time . strptime ( entry , dirmask ) ) except ValueError as e : logger . info ( 'Dir %r did not match dirmask %r: %r' , entry , dirmask , e ) logger . info ( 'Skipping %r' , entry ) continue if stamp < expire_limit : shutil . rmtree ( os . path . join ( path , entry ) ) logger . info ( 'File Publisher: Pruned old dir: %r' , entry ) else : logger . info ( '%r is still active' , entry ) else : logger . info ( '%r is not a directory. Skipping.' , entry ) logger . info ( 'Finished with pruning' )
Removes the directories that are older than a certain date .
13,165
def send_report ( self , report_parts ) : logger . info ( 'Checking and creating the report directory' ) report_parts = sorted ( filter ( lambda x : x . fmt in self . formats , report_parts ) , key = lambda x : self . formats . index ( x . fmt ) ) workdir = os . path . join ( self . pubdir , self . dirname ) if not os . path . isdir ( workdir ) : try : os . makedirs ( workdir ) except OSError as e : logger . error ( 'Error creating directory "{0}": {0}' . format ( workdir , e ) ) return fmtname = '{0}-{1}-{2}.{3}' if len ( report_parts ) > 1 else '{0}-{2}.{3}' for i , text_part in enumerate ( filter ( lambda x : x . fmt in self . formats , report_parts ) ) : filename = fmtname . format ( self . filename , i , socket . gethostname ( ) , text_part . ext ) repfile = os . path . join ( workdir , filename ) logger . info ( 'Dumping the report part %d into %r' , i , repfile ) fh = open ( repfile , 'w' ) fh . write ( text_part . text ) fh . close ( ) print ( 'Report part saved in: %r' % repfile ) if self . notify : logger . info ( 'Creating an email message' ) email_address = self . config . get ( 'main' , 'email_address' ) smtp_server = self . config . get ( 'main' , 'smtp_server' ) publoc = os . path . join ( self . pubroot , self . dirname ) eml = MIMEText ( 'New lograptor report is available at:\r\n{0}' . format ( publoc ) ) eml [ 'Subject' ] = '{0} system events: {1} (report notification)' . format ( socket . gethostname ( ) , time . strftime ( '%c' , time . localtime ( ) ) ) eml [ 'Date' ] = formatdate ( ) eml [ 'From' ] = email_address eml [ 'To' ] = ', ' . join ( self . notify ) eml [ 'X-Mailer' ] = u'{0}-{1}' . format ( package_name , __version__ ) mail_message ( smtp_server , eml . as_string ( ) , email_address , self . notify ) print ( 'Notification mailed to: {0}' . format ( ',' . join ( self . notify ) ) ) if self . rawlogs : logfilename = '{0}.log' . format ( self . filename ) logfile = os . path . join ( workdir , '{0}.gz' . format ( logfilename ) ) logger . info ( 'Gzipping logs and writing them to %r' , logfilename ) outfh = open ( logfile , 'w+b' ) do_chunked_gzip ( self . rawfh , outfh , logfilename ) outfh . close ( ) print ( 'Gzipped logs saved in: {0}' . format ( logfile ) ) self . prune_old ( )
Publish the report parts to local files . Each report part is a text with a title and specific extension . For html and plaintext sending the report part is unique for csv send also the stats and unparsed string are plain text and report items are csv texts .
13,166
def parse ( self ) : self . _parse ( self . method ) return list ( set ( [ deco for deco in self . decos if deco ] ) )
Return the list of string of all the decorators found
13,167
def filter_304_headers ( headers ) : return [ ( k , v ) for k , v in headers if k . lower ( ) not in _filter_from_304 ]
Filter a list of headers to include in a 304 Not Modified response .
13,168
def response ( code , body = '' , etag = None , last_modified = None , expires = None , ** kw ) : if etag is not None : if not ( etag [ 0 ] == '"' and etag [ - 1 ] == '"' ) : etag = '"%s"' % etag kw [ 'etag' ] = etag if last_modified is not None : kw [ 'last_modified' ] = datetime_to_httpdate ( last_modified ) if expires is not None : if isinstance ( expires , datetime ) : kw [ 'expires' ] = datetime_to_httpdate ( expires ) else : kw [ 'expires' ] = timedelta_to_httpdate ( expires ) headers = [ ( k . replace ( '_' , '-' ) . title ( ) , v ) for k , v in sorted ( kw . items ( ) ) ] return Response ( code , headers , body )
Helper to build an HTTP response .
13,169
def body ( self ) : if self . _body is None : raw_body = self . _raw_body if self . _body_writer is None : self . _body = raw_body ( ) if callable ( raw_body ) else raw_body else : self . _body = self . _body_writer ( raw_body ) return self . _body
Seralizes and returns the response body .
13,170
def set_cookie ( self , key , value = '' , max_age = None , path = '/' , domain = None , secure = False , httponly = False , expires = None ) : key , value = key . encode ( 'utf-8' ) , value . encode ( 'utf-8' ) cookie = SimpleCookie ( { key : value } ) m = cookie [ key ] if max_age is not None : if isinstance ( max_age , timedelta ) : m [ 'max-age' ] = int ( total_seconds ( max_age ) ) else : m [ 'max-age' ] = int ( max_age ) if path is not None : m [ 'path' ] = path . encode ( 'utf-8' ) if domain is not None : m [ 'domain' ] = domain . encode ( 'utf-8' ) if secure : m [ 'secure' ] = True if httponly : m [ 'httponly' ] = True if expires is not None : if isinstance ( expires , datetime ) : expires = total_seconds ( expires - datetime . utcnow ( ) ) elif isinstance ( expires , timedelta ) : expires = total_seconds ( expires ) m [ 'expires' ] = int ( expires ) self . headers . add_header ( 'Set-Cookie' , m . OutputString ( ) )
Set a response cookie .
13,171
def conditional_to ( self , request ) : if not self . code == 200 : return self request_headers = request . headers response_headers = self . headers if_none_match = request_headers . get ( 'If-None-Match' ) if_modified_since = request_headers . get ( 'If-Modified-Since' ) etag_ok , date_ok = False , False if if_none_match : etag = response_headers . get ( 'ETag' ) if etag and match_etag ( etag , if_none_match , weak = True ) : etag_ok = True if if_modified_since : last_modified = response_headers . get ( 'Last-Modified' ) if last_modified : try : modified_ts = httpdate_to_timestamp ( last_modified ) last_valid_ts = httpdate_to_timestamp ( if_modified_since ) if modified_ts <= last_valid_ts : date_ok = True except : pass if if_none_match and not etag_ok : return self elif if_modified_since and not date_ok : return self elif etag_ok or date_ok : headers = filter_304_headers ( self . headers . items ( ) ) if 'Date' not in self . headers : headers . append ( ( 'Date' , datetime_to_httpdate ( time . time ( ) ) ) ) return Response ( status = 304 , headers = headers , body = '' ) return self
Return a response that is conditional to a given request .
13,172
def _get_ukko_report ( ) : with urllib . request . urlopen ( URL_UKKO_REPORT ) as response : ret = str ( response . read ( ) ) return ret
Get Ukko s report from the fixed URL .
13,173
def get_nodes ( n = 8 , exclude = [ ] , loop = None ) : report = _get_ukko_report ( ) nodes = _parse_ukko_report ( report ) ret = [ ] while len ( ret ) < n and len ( nodes ) > 0 : node = nodes [ 0 ] if node not in exclude : reachable = True if loop is not None : reachable = loop . run_until_complete ( _test_node ( node ) ) if reachable : ret . append ( node ) nodes = nodes [ 1 : ] return ret
Get Ukko nodes with the least amount of load .
13,174
def get_unix_ioctl_terminal_size ( ) : def ioctl_gwinsz ( fd ) : try : import fcntl import termios import struct return struct . unpack ( 'hh' , fcntl . ioctl ( fd , termios . TIOCGWINSZ , '1234' ) ) except ( IOError , OSError ) : return None cr = ioctl_gwinsz ( 0 ) or ioctl_gwinsz ( 1 ) or ioctl_gwinsz ( 2 ) if not cr : try : f = open ( os . ctermid ( ) ) cr = ioctl_gwinsz ( f . fileno ( ) ) f . close ( ) except ( IOError , OSError ) : pass if not cr : try : cr = ( os . environ [ 'LINES' ] , os . environ [ 'COLUMNS' ] ) except KeyError : return None return int ( cr [ 1 ] ) , int ( cr [ 0 ] )
Get the terminal size of a UNIX terminal using the ioctl UNIX command .
13,175
def add_marker_to_qtl ( qtl , map_list ) : closest = '' diff = None for marker in map_list : if qtl [ 1 ] == marker [ 1 ] : tmp_diff = float ( qtl [ 2 ] ) - float ( marker [ 2 ] ) if diff is None or abs ( diff ) > abs ( tmp_diff ) : diff = tmp_diff closest = marker if closest != '' : closest = closest [ 0 ] return closest
Add the closest marker to the given QTL .
13,176
def add_marker_to_qtls ( qtlfile , mapfile , outputfile = 'qtls_with_mk.csv' ) : qtl_list = read_input_file ( qtlfile , ',' ) map_list = read_input_file ( mapfile , ',' ) if not qtl_list or not map_list : return qtl_list [ 0 ] . append ( 'Closest marker' ) qtls = [ ] qtls . append ( qtl_list [ 0 ] ) for qtl in qtl_list [ 1 : ] : qtl . append ( add_marker_to_qtl ( qtl , map_list ) ) qtls . append ( qtl ) LOG . info ( '- %s QTLs processed in %s' % ( len ( qtls ) , qtlfile ) ) write_matrix ( outputfile , qtls )
This function adds to a list of QTLs the closest marker to the QTL peak .
13,177
def addFilter ( self , filterMethod = FILTER_METHOD_AND , ** kwargs ) : filterMethod = filterMethod . upper ( ) if filterMethod not in FILTER_METHODS : raise ValueError ( 'Unknown filter method, %s. Must be one of: %s' % ( str ( filterMethod ) , repr ( FILTER_METHODS ) ) ) self . filters . append ( ( filterMethod , kwargs ) )
addFilter - Add a filter to this query .
13,178
def execute ( self , lst ) : from . import QueryableListMixed if not issubclass ( lst . __class__ , QueryableListBase ) : lst = QueryableListMixed ( lst ) filters = copy . copy ( self . filters ) nextFilter = filters . popleft ( ) while nextFilter : ( filterMethod , filterArgs ) = nextFilter lst = self . _applyFilter ( lst , filterMethod , filterArgs ) if len ( lst ) == 0 : return lst try : nextFilter = filters . popleft ( ) except : break return lst
execute - Execute the series of filters in order on the provided list .
13,179
def copy ( self ) : ret = QueryBuilder ( ) ret . filters = copy . copy ( self . filters ) return ret
copy - Create a copy of this query .
13,180
def _applyFilter ( lst , filterMethod , filterArgs ) : if filterMethod == FILTER_METHOD_AND : return lst . filterAnd ( ** filterArgs ) else : return lst . filterOr ( ** filterArgs )
_applyFilter - Applies the given filter method on a set of args
13,181
def _raise_corsair_error ( self , error = None , message = "" ) : if error is None : error = self . last_error ( ) raise error ( message )
Raise error message based on the last reported error from the SDK
13,182
def device_count ( self ) : device_count = get_device_count ( self . corsair_sdk ) if device_count == - 1 : self . _raise_corsair_error ( ) return device_count
Find amount of CUE devices
13,183
def led_id_from_char ( self , char ) : led_id = get_led_id_for_key_name ( self . corsair_sdk , bytes ( char ) ) if led_id == 0 : self . _raise_corsair_error ( ) return led_id
Get id of a led by the letter Only between A - Z
13,184
def set_led ( self , led_id , color ) : if not set_leds_color ( self . corsair_sdk , LedColor ( led_id , * color ) ) : self . _raise_corsair_error ( ) return True
Set color of an led
13,185
def request_control ( self , device_id , access_mode = True ) : if access_mode : if not request_control ( self . corsair_sdk , device_id ) : self . _raise_corsair_error ( ) return True else : self . reload ( )
Request exclusive control of device
13,186
def device ( self , device_id , * args , ** kwargs ) : return Device ( device_id , self . corsair_sdk , self . _corsair_sdk_path , * args , ** kwargs )
Return a Device object based on id
13,187
def device_info ( self , device_id = None ) : if device_id is None : device_id = self . device_id return get_device_info ( self . corsair_sdk , device_id )
Return device information if device_id is not specified return for this device
13,188
def get_obsolete_acc_to_uniparc ( acc ) : contents = http_get ( 'www.uniprot.org/uniparc/?query={0}' . format ( acc ) ) mtchs = re . findall ( r'"UPI[A-Z0-9]+?"' , contents , re . DOTALL ) uniparc_id = set ( [ m [ 1 : - 1 ] for m in mtchs ] ) if len ( uniparc_id ) == 1 : return uniparc_id . pop ( ) elif len ( uniparc_id ) > 1 : raise Exception ( 'Multiple UPI identifiers found.' ) return None
Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map .
13,189
def get_common_PDB_IDs ( pdb_id , cache_dir = None , exception_on_failure = True ) : m = pdb_to_uniparc ( [ pdb_id ] , cache_dir = cache_dir ) UniProtACs = [ ] if pdb_id in m : for entry in m [ pdb_id ] : if entry . UniProtACs : UniProtACs . extend ( entry . UniProtACs ) elif exception_on_failure : raise Exception ( 'No UniProtAC for one entry.Lookup failed.' ) elif exception_on_failure : raise Exception ( 'Lookup failed.' ) if not UniProtACs : if exception_on_failure : raise Exception ( 'Lookup failed.' ) else : return None common_set = set ( uniprot_map ( 'ACC' , 'PDB_ID' , [ UniProtACs [ 0 ] ] , cache_dir = cache_dir ) . get ( UniProtACs [ 0 ] , [ ] ) ) for acc in UniProtACs [ 1 : ] : common_set = common_set . intersection ( set ( uniprot_map ( 'ACC' , 'PDB_ID' , [ acc ] , cache_dir = cache_dir ) . get ( acc , [ ] ) ) ) return sorted ( common_set )
This function takes a PDB ID maps it to UniProt ACCs then returns the common set of PDB IDs related to those ACCs . The purpose is to find any PDB files related to pdb_id particularly for complexes such that the other PDB files contain identical sequences or mutant complexes .
13,190
def _parse_sequence_tag ( self ) : entry_tag = self . entry_tag sequence_tags = [ child for child in entry_tag . childNodes if child . nodeType == child . ELEMENT_NODE and child . tagName == 'sequence' ] assert ( len ( sequence_tags ) == 1 ) sequence_tag = sequence_tags [ 0 ] self . atomic_mass = float ( sequence_tag . getAttribute ( "mass" ) ) self . sequence = "" . join ( sequence_tag . firstChild . nodeValue . strip ( ) . split ( "\n" ) ) self . sequence_length = int ( sequence_tag . getAttribute ( "length" ) ) self . CRC64Digest = sequence_tag . getAttribute ( "checksum" )
Parses the sequence and atomic mass .
13,191
def cool_paginate ( context , ** kwargs ) -> dict : names = ( 'size' , 'next_name' , 'previous_name' , 'elastic' , 'page_obj' , ) return_dict = { name : value for name , value in zip ( names , map ( kwargs . get , names ) ) } if context . get ( 'request' ) : return_dict [ 'request' ] = context [ 'request' ] else : raise RequestNotExists ( 'Unable to find request in your template context,' 'please make sure that you have the request context processor enabled' ) if not return_dict . get ( 'page_obj' ) : if context . get ( 'page_obj' ) : return_dict [ 'page_obj' ] = context [ 'page_obj' ] else : raise PageNotSpecified ( 'You customized paginator standard name, ' "but haven't specified it in {% cool_paginate %} tag." ) if not return_dict . get ( 'elastic' ) : return_dict [ 'elastic' ] = getattr ( settings , 'COOL_PAGINATOR_ELASTIC' , 10 ) return return_dict
Main function for pagination process .
13,192
def time_restarts ( data_path ) : path = os . path . join ( data_path , 'last_restarted' ) if not os . path . isfile ( path ) : with open ( path , 'a' ) : os . utime ( path , None ) last_modified = os . stat ( path ) . st_mtime with open ( path , 'a' ) : os . utime ( path , None ) now = os . stat ( path ) . st_mtime dif = round ( now - last_modified , 2 ) last_restart = datetime . fromtimestamp ( now ) . strftime ( '%H:%M:%S' ) result = 'LAST RESTART WAS {} SECONDS AGO at {}' . format ( dif , last_restart ) print ( style ( fg = 'green' , bg = 'red' , text = result ) )
When called will create a file and measure its mtime on restarts
13,193
def from_stmt ( stmt , engine , ** kwargs ) : result_proxy = engine . execute ( stmt , ** kwargs ) return from_db_cursor ( result_proxy . cursor )
Execute a query in form of texture clause return the result in form of
13,194
def compute_stability_classification ( self , predicted_data , record , dataframe_record ) : stability_classification , stability_classication_x_cutoff , stability_classication_y_cutoff = None , self . stability_classication_x_cutoff , self . stability_classication_y_cutoff if record [ 'DDG' ] != None : stability_classification = fraction_correct ( [ record [ 'DDG' ] ] , [ predicted_data [ self . ddg_analysis_type ] ] , x_cutoff = stability_classication_x_cutoff , y_cutoff = stability_classication_y_cutoff ) stability_classification = int ( stability_classification ) assert ( stability_classification == 0 or stability_classification == 1 ) dataframe_record [ 'StabilityClassification' ] = stability_classification
Calculate the stability classification for this case .
13,195
def compute_absolute_error ( self , predicted_data , record , dataframe_record ) : absolute_error = abs ( record [ 'DDG' ] - predicted_data [ self . ddg_analysis_type ] ) dataframe_record [ 'AbsoluteError' ] = absolute_error
Calculate the absolute error for this case .
13,196
def count_residues ( self , record , pdb_record ) : mutations = self . get_record_mutations ( record ) pdb_chains = set ( [ m [ 'Chain' ] for m in mutations ] ) assert ( len ( pdb_chains ) == 1 ) pdb_chain = pdb_chains . pop ( ) return len ( pdb_record . get ( 'Chains' , { } ) . get ( pdb_chain , { } ) . get ( 'Sequence' , '' ) )
Count the number of residues in the chains for the case .
13,197
def full_analysis ( self , analysis_set , output_directory , verbose = True , compile_pdf = True , quick_plots = False ) : if not os . path . isdir ( output_directory ) : os . makedirs ( output_directory ) self . analysis_directory = output_directory self . calculate_metrics ( analysis_set = analysis_set , analysis_directory = output_directory , verbose = verbose ) self . write_dataframe_to_csv ( os . path . join ( output_directory , 'data.csv' ) ) return self . plot ( analysis_set = analysis_set , analysis_directory = output_directory , matplotlib_plots = True , verbose = verbose , compile_pdf = compile_pdf , quick_plots = quick_plots )
Combines calculate_metrics write_dataframe_to_csv and plot
13,198
def get_unique_ajps ( benchmark_runs ) : br_ajps = { } for br in benchmark_runs : for ajp in br . additional_join_parameters : if ajp not in br_ajps : br_ajps [ ajp ] = set ( ) br_ajps [ ajp ] . add ( br . additional_join_parameters [ ajp ] [ 'short_name' ] ) unique_ajps = [ ] for ajp in br_ajps : if len ( br_ajps [ ajp ] ) > 1 : unique_ajps . append ( ajp ) return unique_ajps
Determines which join parameters are unique
13,199
def plot_optimum_prediction_fraction_correct_cutoffs_over_range ( self , analysis_set , min_stability_classication_x_cutoff , max_stability_classication_x_cutoff , suppress_plot = False , analysis_file_prefix = None , verbose = True ) : analysis_set_prefix = '' plot_filename = None if not suppress_plot : output_filename_prefix = '{0}{1}optimum_fraction_correct_at_varying_kcal_mol' . format ( analysis_file_prefix , analysis_set_prefix ) plot_filename = output_filename_prefix + '.png' csv_filename = output_filename_prefix + '.txt' lines = [ 'ExperimentalCutoff,BestPredictionCutoff' ] x_cutoff = min_stability_classication_x_cutoff x_values = [ ] y_values = [ ] avg_scale = 0 plot_graph = self . generate_plots and not ( suppress_plot ) while x_cutoff < max_stability_classication_x_cutoff + 0.1 : max_value_cutoff , max_value , fraction_correct_range = self . determine_optimum_fraction_correct_cutoffs ( analysis_set , self . dataframe , x_cutoff ) if plot_graph : lines . append ( ',' . join ( map ( str , ( x_cutoff , max_value_cutoff ) ) ) ) x_values . append ( x_cutoff ) y_values . append ( max_value_cutoff ) avg_scale += max_value_cutoff / x_cutoff x_cutoff += 0.1 if plot_graph : write_file ( csv_filename , '\n' . join ( lines ) ) avg_scale = avg_scale / len ( x_values ) x_values = numpy . array ( x_values ) y_values = numpy . array ( y_values ) scalars = y_values / x_values average_scalar = numpy . mean ( scalars ) plot_label_1 = 'Scalar == %0.2f' % average_scalar plot_label_2 = 'sigma == %0.2f' % numpy . std ( scalars ) if plot_graph : if not ( os . path . exists ( plot_filename ) and not ( self . recreate_graphs ) ) : if verbose : self . log ( 'Saving scatterplot to %s.' % plot_filename ) self . log ( 'Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.' % plot_filename ) title = 'Optimum cutoff for fraction correct metric at varying experimental cutoffs' if analysis_set : title += ' for {0}' . format ( analysis_set ) r_script = RInterface . _runRScript ( r_script % locals ( ) ) return average_scalar , plot_filename
Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs . Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs .