idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,700
def complete_delivery_note ( self , delivery_note_id , complete_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTES , billomat_id = delivery_note_id , command = COMPLETE , send_data = complete_dict )
Completes an delivery note
13,701
def delivery_note_pdf ( self , delivery_note_id ) : return self . _create_get_request ( resource = DELIVERY_NOTES , billomat_id = delivery_note_id , command = PDF )
Opens a pdf of a delivery note
13,702
def get_items_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_ITEMS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get items of delivery note per page
13,703
def get_all_items_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_items_of_delivery_note_per_page , resource = DELIVERY_NOTE_ITEMS , ** { 'delivery_note_id' : delivery_note_id } )
Get all items of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,704
def update_delivery_note_item ( self , delivery_note_item_id , delivery_note_item_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTE_ITEMS , billomat_id = delivery_note_item_id , send_data = delivery_note_item_dict )
Updates a delivery note item
13,705
def get_comments_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_COMMENTS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get comments of delivery note per page
13,706
def get_all_comments_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_comments_of_delivery_note_per_page , resource = DELIVERY_NOTE_COMMENTS , ** { 'delivery_note_id' : delivery_note_id } )
Get all comments of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,707
def update_delivery_note_comment ( self , delivery_note_comment_id , delivery_note_comment_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTE_COMMENTS , billomat_id = delivery_note_comment_id , send_data = delivery_note_comment_dict )
Updates a delivery note comment
13,708
def get_tags_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_TAGS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get tags of delivery note per page
13,709
def get_all_tags_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_tags_of_delivery_note_per_page , resource = DELIVERY_NOTE_TAGS , ** { 'delivery_note_id' : delivery_note_id } )
Get all tags of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,710
def get_letters_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = LETTERS , per_page = per_page , page = page , params = params )
Get letters per page
13,711
def get_all_letters ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_letters_per_page , resource = LETTERS , ** { 'params' : params } )
Get all letters This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,712
def update_letter ( self , letter_id , letter_dict ) : return self . _create_put_request ( resource = LETTERS , billomat_id = letter_id , send_data = letter_dict )
Updates a letter
13,713
def get_comments_of_letter_per_page ( self , letter_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = LETTER_COMMENTS , per_page = per_page , page = page , params = { 'letter_id' : letter_id } , )
Get comments of letter per page
13,714
def get_all_comments_of_letter ( self , letter_id ) : return self . _iterate_through_pages ( get_function = self . get_comments_of_letter_per_page , resource = LETTER_COMMENTS , ** { 'letter_id' : letter_id } )
Get all comments of letter This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,715
def update_letter_comment ( self , letter_comment_id , letter_comment_dict ) : return self . _create_put_request ( resource = LETTER_COMMENTS , billomat_id = letter_comment_id , send_data = letter_comment_dict )
Updates a letter comment
13,716
def get_tags_of_letter_per_page ( self , letter_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = LETTER_TAGS , per_page = per_page , page = page , params = { 'letter_id' : letter_id } , )
Get tags of letter per page
13,717
def get_all_tags_of_letter ( self , letter_id ) : return self . _iterate_through_pages ( get_function = self . get_tags_of_letter_per_page , resource = LETTER_TAGS , ** { 'letter_id' : letter_id } )
Get all tags of letter This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,718
def get_email_templates_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = EMAIL_TEMPLATES , per_page = per_page , page = page , params = params )
Get e - mail templates per page
13,719
def get_email_templates ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_email_templates_per_page , resource = EMAIL_TEMPLATES , ** { 'params' : params } )
Get all e - mail templates This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,720
def update_email_template ( self , template_id , template_dict ) : return self . _create_put_request ( resource = EMAIL_TEMPLATES , billomat_id = template_id , send_data = template_dict )
Updates a emailtemplate
13,721
def get_templates_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = TEMPLATES , per_page = per_page , page = page , params = params )
Get templates per page
13,722
def get_all_templates ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_templates_per_page , resource = TEMPLATES , ** { 'params' : params } )
Get all templates This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,723
def update_template ( self , template_id , template_dict ) : return self . _create_put_request ( resource = TEMPLATES , billomat_id = template_id , send_data = template_dict )
Updates a template
13,724
def reverse_translate ( protein_seq , template_dna = None , leading_seq = None , trailing_seq = None , forbidden_seqs = ( ) , include_stop = True , manufacturer = None ) : if manufacturer == 'gen9' : forbidden_seqs += gen9 . reserved_restriction_sites leading_seq = restriction_sites . get ( leading_seq , leading_seq or '' ) trailing_seq = restriction_sites . get ( trailing_seq , trailing_seq or '' ) codon_list = make_codon_list ( protein_seq , template_dna , include_stop ) sanitize_codon_list ( codon_list , forbidden_seqs ) dna_seq = leading_seq + '' . join ( codon_list ) + trailing_seq if manufacturer == 'gen9' : gen9 . apply_quality_control_checks ( dna_seq ) return dna_seq
Generate a well - behaved DNA sequence from the given protein sequence . If a template DNA sequence is specified the returned DNA sequence will be as similar to it as possible . Any given restriction sites will not be present in the sequence . And finally the given leading and trailing sequences will be appropriately concatenated .
13,725
def make_codon_list ( protein_seq , template_dna = None , include_stop = True ) : codon_list = [ ] if template_dna is None : template_dna = [ ] for i , res in enumerate ( protein_seq . upper ( ) ) : try : template_codon = template_dna [ 3 * i : 3 * i + 3 ] except IndexError : template_codon = '---' possible_codons = dna . ecoli_reverse_translate [ res ] possible_codons . sort ( key = lambda x : dna . num_mutations ( x , template_codon ) ) codon_list . append ( possible_codons [ 0 ] ) last_codon = codon_list [ - 1 ] stop_codons = dna . ecoli_reverse_translate [ '.' ] if include_stop and last_codon not in stop_codons : codon_list . append ( stop_codons [ 0 ] ) return codon_list
Return a list of codons that would be translated to the given protein sequence . Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer optimal codons .
13,726
def sanitize_codon_list ( codon_list , forbidden_seqs = ( ) ) : for codon in codon_list : if len ( codon ) != 3 : raise ValueError ( "Codons must have exactly 3 bases: '{}'" . format ( codon ) ) bad_seqs = set ( ) bad_seqs . union ( restriction_sites . get ( seq , seq ) for seq in forbidden_seqs ) bad_seqs . union ( dna . reverse_complement ( seq ) for seq in bad_seqs ) bad_seqs . union ( base * ( gen9 . homopolymer_max_lengths [ base ] + 1 ) for base in dna . dna_bases ) bad_seqs = [ dna . dna_to_re ( bs ) for bs in bad_seqs ] num_corrections = 0 for bad_seq in bad_seqs : while remove_bad_sequence ( codon_list , bad_seq , bad_seqs ) : num_corrections += 1 return num_corrections
Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it . Undesirable sequences include restriction sites which may be optionally specified as a second argument and homopolymers above a pre - defined length . The return value is the number of corrections made to the codon list .
13,727
def remove_bad_sequence ( codon_list , bad_seq , bad_seqs ) : gene_seq = '' . join ( codon_list ) problem = bad_seq . search ( gene_seq ) if not problem : return False bs_start_codon = problem . start ( ) // 3 bs_end_codon = problem . end ( ) // 3 for i in range ( bs_start_codon , bs_end_codon ) : problem_codon = codon_list [ i ] amino_acid = translate_dna ( problem_codon ) alternate_codons = [ codon for codon in dna . ecoli_reverse_translate [ amino_acid ] if codon != problem_codon ] for alternate_codon in alternate_codons : codon_list [ i ] = alternate_codon if problem_with_codon ( i , codon_list , bad_seqs ) : codon_list [ i ] = problem_codon else : return True raise RuntimeError ( "Could not remove bad sequence '{}' from gene." . format ( bs ) )
Make a silent mutation to the given codon list to remove the first instance of the given bad sequence found in the gene sequence . If the bad sequence isn t found nothing happens and the function returns false . Otherwise the function returns true . You can use these return values to easily write a loop totally purges the bad sequence from the codon list . Both the specific bad sequence in question and the list of all bad sequences are expected to be regular expressions .
13,728
def problem_with_codon ( codon_index , codon_list , bad_seqs ) : base_1 = 3 * codon_index base_3 = 3 * codon_index + 2 gene_seq = '' . join ( codon_list ) for bad_seq in bad_seqs : problem = bad_seq . search ( gene_seq ) if problem and problem . start ( ) < base_3 and problem . end ( ) > base_1 : return True return False
Return true if the given codon overlaps with a bad sequence .
13,729
def sequences_from_fasta ( path ) : from Bio import SeqIO return { x . description : x . seq for x in SeqIO . parse ( path , 'fasta' ) }
Extract multiple sequences from a FASTA file .
13,730
def write_sequences_to_fasta ( path , seqs ) : from Bio import SeqIO from Bio . Seq import Seq from Bio . SeqRecord import SeqRecord path = Path ( path ) records = [ ] for id , seq in seqs . items ( ) : record = SeqRecord ( Seq ( seq ) , id = id , description = '' ) records . append ( record ) SeqIO . write ( records , str ( path ) , 'fasta' )
Create a FASTA file listing the given sequences .
13,731
def write_sequences_to_xlsx ( path , seqs ) : from openpyxl import Workbook wb = Workbook ( ) ws = wb . active for row , id in enumerate ( seqs , 1 ) : ws . cell ( row , 1 ) . value = id ws . cell ( row , 2 ) . value = seqs [ id ] wb . save ( path )
Create a XLSX file listing the given sequences .
13,732
def add_node ( self , node ) : nodes = [ n for n in self . nodes ( ) if not isinstance ( n , Source ) ] num_agents = len ( nodes ) curr_generation = int ( ( num_agents - 1 ) / float ( self . generation_size ) ) node . generation = curr_generation if curr_generation == 0 : if self . initial_source : source = min ( self . nodes ( type = Source ) , key = attrgetter ( 'creation_time' ) ) source . connect ( whom = node ) source . transmit ( to_whom = node ) else : prev_agents = Node . query . filter_by ( failed = False , network_id = self . id , generation = ( curr_generation - 1 ) ) . all ( ) prev_fits = [ p . fitness for p in prev_agents ] prev_probs = [ ( f / ( 1.0 * sum ( prev_fits ) ) ) for f in prev_fits ] rnd = random . random ( ) temp = 0.0 for i , probability in enumerate ( prev_probs ) : temp += probability if temp > rnd : parent = prev_agents [ i ] break parent . connect ( whom = node ) parent . transmit ( to_whom = node )
Link the agent to a random member of the previous generation .
13,733
def add_node ( self , node ) : nodes = self . nodes ( ) if len ( nodes ) <= self . m0 : other_nodes = [ n for n in nodes if n . id != node . id ] for n in other_nodes : node . connect ( direction = "both" , whom = n ) else : for idx_newvector in xrange ( self . m ) : these_nodes = [ n for n in nodes if ( n . id != node . id and not n . is_connected ( direction = "either" , whom = node ) ) ] outdegrees = [ len ( n . vectors ( direction = "outgoing" ) ) for n in these_nodes ] ps = [ ( d / ( 1.0 * sum ( outdegrees ) ) ) for d in outdegrees ] rnd = random . random ( ) * sum ( ps ) cur = 0.0 for i , p in enumerate ( ps ) : cur += p if rnd < cur : vector_to = these_nodes [ i ] node . connect ( direction = "both" , whom = vector_to )
Add newcomers one by one using linear preferential attachment .
13,734
def docker_py_dict ( self ) : return { 'image' : self . image , 'command' : self . cmd , 'hostname' : self . hostname , 'user' : self . user , 'detach' : self . detach , 'stdin_open' : self . open_stdin , 'tty' : self . tty , 'ports' : self . exposed_ports , 'environment' : self . env , 'volumes' : self . volumes , 'network_disabled' : self . network_disabled , 'entrypoint' : self . entry_point , 'working_dir' : self . working_dir , 'domainname' : self . domain_name , 'labels' : self . labels }
Convert object to match valid docker - py properties .
13,735
def get_person_by_regid ( self , regid ) : if not self . valid_uwregid ( regid ) : raise InvalidRegID ( regid ) url = "{}/{}/full.json" . format ( PERSON_PREFIX , regid . upper ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) return self . _person_from_json ( response . data )
Returns a restclients . Person object for the given regid . If the regid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,736
def get_person_by_netid ( self , netid ) : if not self . valid_uwnetid ( netid ) : raise InvalidNetID ( netid ) url = "{}/{}/full.json" . format ( PERSON_PREFIX , netid . lower ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) return self . _person_from_json ( response . data )
Returns a restclients . Person object for the given netid . If the netid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,737
def get_person_by_employee_id ( self , employee_id ) : if not self . valid_employee_id ( employee_id ) : raise InvalidEmployeeID ( employee_id ) url = "{}.json?{}" . format ( PERSON_PREFIX , urlencode ( { "employee_id" : employee_id } ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) data = json . loads ( response . data ) if not len ( data [ "Persons" ] ) : raise DataFailureException ( url , 404 , "No person found" ) regid = data [ "Persons" ] [ 0 ] [ "PersonURI" ] [ "UWRegID" ] return self . get_person_by_regid ( regid )
Returns a restclients . Person object for the given employee id . If the employee id isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,738
def get_person_by_student_number ( self , student_number ) : if not self . valid_student_number ( student_number ) : raise InvalidStudentNumber ( student_number ) url = "{}.json?{}" . format ( PERSON_PREFIX , urlencode ( { "student_number" : student_number } ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) data = json . loads ( response . data ) if not len ( data [ "Persons" ] ) : raise DataFailureException ( url , 404 , "No person found" ) regid = data [ "Persons" ] [ 0 ] [ "PersonURI" ] [ "UWRegID" ] return self . get_person_by_regid ( regid )
Returns a restclients . Person object for the given student number . If the student number isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,739
def get_person_by_prox_rfid ( self , prox_rfid ) : if not self . valid_prox_rfid ( prox_rfid ) : raise InvalidProxRFID ( prox_rfid ) url = "{}.json?{}" . format ( CARD_PREFIX , urlencode ( { "prox_rfid" : prox_rfid } ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) data = json . loads ( response . data ) if not len ( data [ "Cards" ] ) : raise DataFailureException ( url , 404 , "No card found" ) regid = data [ "Cards" ] [ 0 ] [ "RegID" ] return self . get_person_by_regid ( regid )
Returns a restclients . Person object for the given rfid . If the rfid isn t found or if there is an error communicating with the IdCard WS a DataFailureException will be thrown .
13,740
def get_entity_by_regid ( self , regid ) : if not self . valid_uwregid ( regid ) : raise InvalidRegID ( regid ) url = "{}/{}.json" . format ( ENTITY_PREFIX , regid . upper ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) return self . _entity_from_json ( response . data )
Returns a restclients . Entity object for the given regid . If the regid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,741
def get_entity_by_netid ( self , netid ) : if not self . valid_uwnetid ( netid ) : raise InvalidNetID ( netid ) url = "{}/{}.json" . format ( ENTITY_PREFIX , netid . lower ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response . status , response . data ) return self . _entity_from_json ( response . data )
Returns a restclients . Entity object for the given netid . If the netid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,742
def generate_identifier ( sender , instance , ** kwargs ) : identifier = Concept . create_identifier ( instance . query ) qs = Concept . objects . filter ( identifier = identifier , lang = instance . lang ) if instance . pk : qs = qs . exclude ( pk = instance . pk ) if qs . count ( ) > 0 : raise ValueError ( "Concept identifier conflict" ) instance . identifier = identifier
Generate and set identifier of concept before saving object to DB
13,743
def get_concept_item_mapping ( self , concepts = None , lang = None ) : if concepts is None : concepts = self . filter ( active = True ) if lang is not None : concepts = concepts . filter ( lang = lang ) if lang is None : languages = set ( [ concept . lang for concept in concepts ] ) if len ( languages ) > 1 : raise Exception ( 'Concepts has multiple languages' ) lang = list ( languages ) [ 0 ] item_lists = Item . objects . filter_all_reachable_leaves_many ( [ json . loads ( concept . query ) for concept in concepts ] , lang ) return dict ( zip ( [ c . pk for c in concepts ] , item_lists ) )
Get mapping of concepts to items belonging to concept .
13,744
def get_item_concept_mapping ( self , lang ) : concepts = self . filter ( active = True , lang = lang ) return group_keys_by_value_lists ( Concept . objects . get_concept_item_mapping ( concepts , lang ) )
Get mapping of items_ids to concepts containing these items
13,745
def get_concepts_to_recalculate ( self , users , lang , concepts = None ) : only_one_user = False if not isinstance ( users , list ) : only_one_user = True users = [ users ] mapping = self . get_item_concept_mapping ( lang ) current_user_stats = defaultdict ( lambda : { } ) user_stats_qs = UserStat . objects . filter ( user__in = users , stat = "answer_count" ) if concepts is not None : user_stats_qs = user_stats_qs . filter ( concept__in = concepts ) for user_stat in user_stats_qs : current_user_stats [ user_stat . user_id ] [ user_stat . concept_id ] = user_stat concepts_to_recalculate = defaultdict ( lambda : set ( ) ) for user , item , time in Answer . objects . filter ( user__in = users ) . values_list ( "user_id" , "item" ) . annotate ( Max ( "time" ) ) : if item not in mapping : continue time_expiration_lower_bound = get_config ( 'proso_models' , 'knowledge_overview.time_shift_hours' , default = 4 ) time_expiration_factor = get_config ( 'proso_models' , 'knowledge_overview.time_expiration_factor' , default = 2 ) for concept in mapping [ item ] : if user in current_user_stats and concept in current_user_stats [ user ] and current_user_stats [ user ] [ concept ] . time > time : if not self . has_time_expired ( current_user_stats [ user ] [ concept ] . time , time , time_expiration_lower_bound , time_expiration_factor ) : continue if concepts is None or concept in ( [ c . pk for c in concepts ] if type ( concepts [ 0 ] ) == Concept else Concept ) : concepts_to_recalculate [ user ] . add ( concept ) if only_one_user : return concepts_to_recalculate [ users [ 0 ] ] return concepts_to_recalculate
Get concept which have same changes and have to be recalculated
13,746
def recalculate_concepts ( self , concepts , lang = None ) : if len ( concepts ) == 0 : return if lang is None : items = Concept . objects . get_concept_item_mapping ( concepts = Concept . objects . filter ( pk__in = set ( flatten ( concepts . values ( ) ) ) ) ) else : items = Concept . objects . get_concept_item_mapping ( lang = lang ) environment = get_environment ( ) mastery_threshold = get_mastery_trashold ( ) for user , concepts in concepts . items ( ) : all_items = list ( set ( flatten ( [ items [ c ] for c in concepts ] ) ) ) answer_counts = environment . number_of_answers_more_items ( all_items , user ) correct_answer_counts = environment . number_of_correct_answers_more_items ( all_items , user ) predictions = dict ( list ( zip ( all_items , get_predictive_model ( ) . predict_more_items ( environment , user , all_items , time = get_time_for_knowledge_overview ( ) ) ) ) ) new_user_stats = [ ] stats_to_delete_condition = Q ( ) for concept in concepts : answer_aggregates = Answer . objects . filter ( user = user , item__in = items [ concept ] ) . aggregate ( time_spent = Sum ( "response_time" ) , sessions = Count ( "session" , True ) , time_first = Min ( "time" ) , time_last = Max ( "time" ) , ) stats = { "answer_count" : sum ( answer_counts [ i ] for i in items [ concept ] ) , "correct_answer_count" : sum ( correct_answer_counts [ i ] for i in items [ concept ] ) , "item_count" : len ( items [ concept ] ) , "practiced_items_count" : sum ( [ answer_counts [ i ] > 0 for i in items [ concept ] ] ) , "mastered_items_count" : sum ( [ predictions [ i ] >= mastery_threshold for i in items [ concept ] ] ) , "prediction" : sum ( [ predictions [ i ] for i in items [ concept ] ] ) / len ( items [ concept ] ) , "time_spent" : answer_aggregates [ "time_spent" ] / 1000 , "session_count" : answer_aggregates [ "sessions" ] , "time_first" : answer_aggregates [ "time_first" ] . timestamp ( ) , "time_last" : answer_aggregates [ "time_last" ] . timestamp ( ) , } stats_to_delete_condition |= Q ( user = user , concept = concept ) for stat_name , value in stats . items ( ) : new_user_stats . append ( UserStat ( user_id = user , concept_id = concept , stat = stat_name , value = value ) ) self . filter ( stats_to_delete_condition ) . delete ( ) self . bulk_create ( new_user_stats )
Recalculated given concepts for given users
13,747
def get_user_stats ( self , users , lang = None , concepts = None , since = None , recalculate = True ) : only_one_user = False if not isinstance ( users , list ) : users = [ users ] only_one_user = True if recalculate : if lang is None : raise ValueError ( 'Recalculation without lang is not supported.' ) time_start = time_lib ( ) concepts_to_recalculate = Concept . objects . get_concepts_to_recalculate ( users , lang , concepts ) LOGGER . debug ( "user_stats - getting identifying concepts to recalculate: %ss" , ( time_lib ( ) - time_start ) ) time_start = time_lib ( ) self . recalculate_concepts ( concepts_to_recalculate , lang ) LOGGER . debug ( "user_stats - recalculating concepts: %ss" , ( time_lib ( ) - time_start ) ) qs = self . prepare_related ( ) . filter ( user__in = users , concept__active = True ) if concepts is not None : qs = qs . filter ( concept__in = concepts ) if lang is not None : qs = qs . filter ( concept__lang = lang ) if since is not None : qs = qs . filter ( time__gte = since ) data = defaultdict ( lambda : defaultdict ( lambda : { } ) ) for user_stat in qs : data [ user_stat . user_id ] [ user_stat . concept . identifier ] [ user_stat . stat ] = user_stat . value if only_one_user : return data [ users [ 0 ] . pk if type ( users [ 0 ] ) == User else users [ 0 ] ] return data
Finds all UserStats of given concepts and users . Recompute UserStats if necessary
13,748
def locked_execute ( self , sql , parameters = None , cursorClass = DictCursor , quiet = False ) : return self . execute ( sql , parameters , cursorClass , quiet = quiet , locked = True )
We are lock - happy here but SQL performance is not currently an issue daemon - side .
13,749
def execute ( self , sql , parameters = None , cursorClass = DictCursor , quiet = False , locked = False , do_commit = True ) : i = 0 errcode = 0 caughte = None cursor = None if sql . find ( ";" ) != - 1 or sql . find ( "\\G" ) != - 1 : raise Exception ( "The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql ) while i < self . numTries : i += 1 try : assert ( not ( self . connection ) or not ( self . connection . open ) ) self . _get_connection ( cursorClass ) cursor = self . connection . cursor ( ) if locked : cursor . execute ( self . lockstring ) self . locked = True if parameters : errcode = cursor . execute ( sql , parameters ) else : errcode = cursor . execute ( sql ) self . lastrowid = int ( cursor . lastrowid ) if do_commit and self . isInnoDB : self . connection . commit ( ) results = cursor . fetchall ( ) if locked : cursor . execute ( self . unlockstring ) self . locked = False cursor . close ( ) self . _close_connection ( ) return results except MySQLdb . OperationalError , e : if cursor : if self . locked : cursor . execute ( self . unlockstring ) self . locked = False cursor . close ( ) self . _close_connection ( ) caughte = str ( e ) errcode = e [ 0 ] continue except Exception , e : if cursor : if self . locked : cursor . execute ( self . unlockstring ) self . locked = False cursor . close ( ) self . _close_connection ( ) caughte = str ( e ) traceback . print_exc ( ) break sleep ( 0.2 ) if not quiet : sys . stderr . write ( "\nSQL execution error in query %s at %s:" % ( sql , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) ) sys . stderr . write ( "\nErrorcode/Error: %d - '%s'.\n" % ( errcode , str ( caughte ) ) ) sys . stderr . flush ( ) raise MySQLdb . OperationalError ( caughte )
Execute SQL query . This uses DictCursor by default .
13,750
def insertDict ( self , tblname , d , fields = None ) : if fields == None : fields = sorted ( d . keys ( ) ) values = None try : SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) ) values = tuple ( [ d [ k ] for k in fields ] ) self . locked_execute ( SQL , parameters = values ) except Exception , e : if SQL and values : sys . stderr . write ( "\nSQL execution error in query '%s' %% %s at %s:" % ( SQL , values , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) ) sys . stderr . write ( "\nError: '%s'.\n" % ( str ( e ) ) ) sys . stderr . flush ( ) raise Exception ( "Error occurred during database insertion: '%s'." % str ( e ) )
Simple function for inserting a dictionary whose keys match the fieldnames of tblname .
13,751
def callproc ( self , procname , parameters = ( ) , cursorClass = DictCursor , quiet = False ) : i = 0 errcode = 0 caughte = None while i < self . numTries : i += 1 try : cursor = self . connection . cursor ( cursorClass ) if type ( parameters ) != type ( ( ) ) : parameters = ( parameters , ) errcode = cursor . callproc ( procname , parameters ) results = cursor . fetchall ( ) self . lastrowid = int ( cursor . lastrowid ) cursor . close ( ) return results except MySQLdb . OperationalError , e : errcode = e [ 0 ] self . connection . ping ( ) caughte = e continue except : traceback . print_exc ( ) break if not quiet : sys . stderr . write ( "\nSQL execution error call stored procedure %s at %s:" % ( procname , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) ) sys . stderr . write ( "\nErrorcode/Error: %d - '%s'.\n" % ( errcode , str ( caughte ) ) ) sys . stderr . flush ( ) raise MySQLdb . OperationalError ( caughte )
Calls a MySQL stored procedure procname . This uses DictCursor by default .
13,752
def execQuery ( self , sql , parameters = None , cursorClass = MySQLdb . cursors . Cursor , InnoDB = False ) : i = 0 errcode = 0 caughte = None while i < self . numTries : i += 1 try : cursor = self . connection . cursor ( cursorClass ) if parameters : errcode = cursor . execute ( sql , parameters ) else : errcode = cursor . execute ( sql ) if InnoDB : self . connection . commit ( ) results = cursor . fetchall ( ) self . lastrowid = int ( cursor . lastrowid ) cursor . close ( ) return results except MySQLdb . OperationalError , e : errcode = e [ 0 ] print ( e ) self . connection . ping ( True ) caughte = e continue except : traceback . print_exc ( ) break sys . stderr . write ( "\nSQL execution error in query at %s:" % datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) sys . stderr . write ( "\n %s." % sql ) sys . stderr . flush ( ) sys . stderr . write ( "\nErrorcode: '%s'.\n" % ( str ( caughte ) ) ) sys . stderr . flush ( ) raise MySQLdb . OperationalError ( caughte )
Execute SQL query .
13,753
def _getFieldsInDB ( self , tablename ) : SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename array_data = self . execQuery ( SQL ) return [ x [ 0 ] for x in array_data ]
get all the fields from a specific table
13,754
def _is_ascii_stl ( first_bytes ) : is_ascii = False if 'solid' in first_bytes . decode ( "utf-8" ) . lower ( ) : is_ascii = True return is_ascii
Determine if this is an ASCII based data stream simply by checking the bytes for the word solid .
13,755
def _is_binary_stl ( data ) : is_bin = False start_byte = 0 end_byte = 80 _ = data [ start_byte : end_byte ] start_byte = end_byte end_byte += 4 facet_count = struct . unpack ( 'I' , data [ start_byte : end_byte ] ) [ 0 ] if facet_count > 0 : is_bin = True return is_bin
Determine if this is a binary file through unpacking the first value after the 80th character and testing whether this value is greater than zero . This indicates the number of facets in the file . Could possibly extend this to check that the remaining number of bytes is divisible by 50 .
13,756
def process_bind_param ( self , obj , dialect ) : value = obj or { } if isinstance ( obj , flask_cloudy . Object ) : value = { } for k in self . DEFAULT_KEYS : value [ k ] = getattr ( obj , k ) return super ( self . __class__ , self ) . process_bind_param ( value , dialect )
Get a flask_cloudy . Object and save it as a dict
13,757
def create_tasks ( task_coro , addrs , * args , flatten = True , ** kwargs ) : tasks = [ ] for agent_addr in addrs : task = asyncio . ensure_future ( task_coro ( agent_addr , * args , ** kwargs ) ) tasks . append ( task ) return wait_tasks ( tasks , flatten )
Create and schedule a set of asynchronous tasks .
13,758
async def wait_tasks ( tasks , flatten = True ) : rets = await asyncio . gather ( * tasks ) if flatten and all ( map ( lambda x : hasattr ( x , '__iter__' ) , rets ) ) : rets = list ( itertools . chain ( * rets ) ) return rets
Gather a list of asynchronous tasks and wait their completion .
13,759
def split_addrs ( addrs ) : splitted = { } for addr in addrs : host , port , _ = _addr_key ( addr ) if host not in splitted : splitted [ host ] = { } if port not in splitted [ host ] : splitted [ host ] [ port ] = [ ] splitted [ host ] [ port ] . append ( addr ) return splitted
Split addresses into dictionaries by hosts and ports .
13,760
def addrs2managers ( addrs ) : mgrs = { } for addr in addrs : mgr_addr = get_manager ( addr ) if mgr_addr not in mgrs : mgrs [ mgr_addr ] = [ ] mgrs [ mgr_addr ] . append ( addr ) return mgrs
Map agent addresses to their assumed managers .
13,761
def create_jinja_env ( template_path ) : jinja_env = jinja2 . Environment ( loader = jinja2 . FileSystemLoader ( template_path ) , block_start_string = '{%' , block_end_string = '%}' , variable_start_string = '${' , variable_end_string = '}' , comment_start_string = '{#' , comment_end_string = '#}' , line_statement_prefix = None , line_comment_prefix = None , trim_blocks = True , lstrip_blocks = True , newline_sequence = '\n' ) jinja_env . filters [ 'regexreplace' ] = regex_replace jinja_env . globals . update ( uuidgen = uuidgen ) return jinja_env
Creates a Jinja2 environment with a specific template path .
13,762
def _debug_info ( self ) : self . _msg ( 'DEBUG' ) self . _msg2 ( 'WorkDir: {0}' . format ( self . _curdir ) ) self . _msg2 ( 'Cookies: {0}' . format ( self . _session . cookies ) ) self . _msg2 ( 'Headers: {0}' . format ( self . _session . headers ) ) self . _msg2 ( 'Configs: {0}' . format ( self . _config ) ) self . _msg2 ( 'Customs: {0}' . format ( self . _custom ) ) self . _msg2 ( 'Account: {0}' . format ( self . _account ) )
Show a list of recently variables info .
13,763
def register ( self , argtypes = r'M' , help_msg = None ) : def format_args ( method ) : def wrapped_method ( * args , ** kwargs ) : args_count = len ( args ) argtypes_count = len ( argtypes ) placeholder_count = argtypes . count ( 'H' ) + argtypes . count ( 'h' ) if placeholder_count : min_args_count = ( argtypes_count - placeholder_count ) if args_count < min_args_count or args_count > argtypes_count : raise KngetError ( "args count is invalid." , reason = 'args count is {0}' . format ( args_count ) ) elif args_count != argtypes_count : raise KngetError ( "args count is invalid" , reason = 'args count is {0}' . format ( args_count ) ) argv = [ ] for i in range ( args_count ) : if argtypes [ i ] in ( 'm' , 'M' ) : argv . append ( args [ i ] ) elif argtypes [ i ] in ( 'i' , 'I' ) : argv . append ( int ( args [ i ] ) ) elif argtypes [ i ] in ( 's' , 'S' ) : argv . append ( str ( args [ i ] ) ) elif argtypes [ i ] in ( 'h' , 'H' ) : argv . append ( args [ i ] ) else : raise KngetError ( 'argtype {0} is invalid!' . format ( argtypes [ i ] ) ) return method ( * argv , ** kwargs ) wrapped_method . __doc__ = method . __doc__ self . _commands [ method . __name__ ] = ( wrapped_method , help_msg ) return wrapped_method return format_args
Register a method to a command .
13,764
def run ( self , tags , begin , end = False ) : if not end : end = begin super ( KngetShell , self ) . run ( tags , begin , int ( end ) )
Override method of class Knget
13,765
def write ( models , out = None , base = None , propertybase = None , shorteners = None , logger = logging ) : assert out is not None if not isinstance ( models , list ) : models = [ models ] shorteners = shorteners or { } all_propertybase = [ propertybase ] if propertybase else [ ] all_propertybase . append ( VERSA_BASEIRI ) if any ( ( base , propertybase , shorteners ) ) : out . write ( '# @docheader\n\n* @iri:\n' ) if base : out . write ( ' * @base: {0}' . format ( base ) ) out . write ( '\n\n' ) origin_space = set ( ) for m in models : origin_space . update ( all_origins ( m ) ) for o in origin_space : out . write ( '# {0}\n\n' . format ( o ) ) for o_ , r , t , a in m . match ( o ) : abbr_r = abbreviate ( r , all_propertybase ) value_format ( t ) out . write ( '* {0}: {1}\n' . format ( abbr_r , value_format ( t ) ) ) for k , v in a . items ( ) : abbr_k = abbreviate ( k , all_propertybase ) out . write ( ' * {0}: {1}\n' . format ( k , value_format ( v ) ) ) out . write ( '\n' ) return
models - input Versa models from which output is generated . Must be a sequence object not an iterator
13,766
def register_routes ( self ) : routes = self . flatten_urls ( self . urls ) self . controllers = { } controller_names = set ( ) for route in routes : cname = route [ 'endpoint' ] . split ( '.' ) [ 0 ] controller_names . add ( cname ) for cname in controller_names : attr = getattr ( self . mcontrollers , cname ) instance = attr ( request , response ) self . controllers [ cname ] = instance for route in routes : cname , aname = route [ 'endpoint' ] . split ( '.' ) action = getattr ( self . controllers [ cname ] , aname ) self . wsgi . route ( route [ 'url' ] , route [ 'methods' ] , action )
Function creates instances of controllers adds into bottle routes
13,767
def register_extensions ( self ) : try : for extension , config in self . config [ 'extensions' ] . items ( ) : extension_bstr = '' extension_pieces = extension . split ( '.' ) if len ( extension_pieces ) > 1 : extension_bstr = '.' . join ( extension_pieces ) else : extension_bstr = 'glim_extensions.%s' % extension_pieces [ 0 ] extension_module = import_module ( extension_bstr ) if extension_module : extension_startstr = '%s.%s' % ( extension_bstr , 'start' ) extension_start = import_module ( extension_startstr , pass_errors = True ) extension_cmdsstr = '%s.%s' % ( extension_bstr , 'commands' ) extension_cmds = import_module ( extension_cmdsstr , pass_errors = True ) if extension_start is not None : before = extension_start . before before ( config ) if extension_cmds is not None : if self . commandadapter is not None : self . commandadapter . register_extension ( extension_cmds , extension_pieces [ 0 ] ) else : GlimLog . error ( 'Extension %s could not be loaded' % extension ) except Exception as e : GlimLog . error ( traceback . format_exc ( ) )
Function registers extensions given extensions list
13,768
def register_ssl_context ( self ) : if not empty ( 'ssl' , self . config [ 'app' ] ) : self . ssl_context = self . config [ 'app' ] [ 'ssl' ] else : self . ssl_context = None
Function detects ssl context
13,769
def flatten_urls ( self , urls ) : available_methods = [ 'POST' , 'PUT' , 'OPTIONS' , 'GET' , 'DELETE' , 'TRACE' , 'COPY' ] ruleset = [ ] for route , endpoint in urls . items ( ) : route_pieces = route . split ( ' ' ) try : methods = url = None if len ( route_pieces ) > 1 : methods = [ route_pieces [ 0 ] ] url = route_pieces [ 1 ] else : methods = available_methods url = route_pieces [ 0 ] endpoint_pieces = endpoint . split ( '.' ) if len ( endpoint_pieces ) > 1 : rule = { 'url' : url , 'endpoint' : endpoint , 'methods' : methods } ruleset . append ( rule ) else : for method in available_methods : rule = { 'url' : url , 'endpoint' : '%s.%s' % ( endpoint , method . lower ( ) ) , 'methods' : [ method ] } ruleset . append ( rule ) except Exception as e : raise InvalidRouteDefinitionError ( ) return ruleset
Function flatten urls for route grouping feature of glim .
13,770
def _info ( self , args , ** extra_args ) : if not isinstance ( args , argparse . Namespace ) : raise logger . error ( Exception ( "args should of an instance of argparse.Namespace" ) ) logger . info ( "Freight Forwarder: {0}" . format ( VERSION ) ) logger . info ( "docker-py: {0}" . format ( docker_py_version ) ) logger . info ( "Docker Api: {0}" . format ( DOCKER_API_VERSION ) ) logger . info ( "{0} version: {1}" . format ( platform . python_implementation ( ) , platform . python_version ( ) ) )
Print freight forwarder info to the user .
13,771
def register ( self , typ ) : def _func ( cls ) : if typ in self . _class : raise ValueError ( "duplicated type name '%s'" % typ ) cls . plugin_type = typ self . _class [ typ ] = cls return cls return _func
register a plugin
13,772
def get_plugin_class ( self , typ ) : if typ in self . _class : return self . _class [ typ ] try : importlib . import_module ( "%s.%s" % ( self . namespace , typ ) ) if typ in self . _class : return self . _class [ typ ] except ImportError as e : self . log . debug ( "ImportError " + str ( e ) ) raise ValueError ( "unknown plugin '%s'" % typ )
get class by name
13,773
def register_error_handler ( app , handler = None ) : if not handler : handler = default_error_handler for code in exceptions . default_exceptions . keys ( ) : app . register_error_handler ( code , handler )
Register error handler Registers an exception handler on the app instance for every type of exception code werkzeug is aware about .
13,774
def default_error_handler ( exception ) : http_exception = isinstance ( exception , exceptions . HTTPException ) code = exception . code if http_exception else 500 if code == 500 : current_app . logger . error ( exception ) if has_app_context ( ) and has_request_context ( ) : headers = request . headers if 'Accept' in headers and headers [ 'Accept' ] == 'application/json' : return json_error_handler ( exception ) return template_error_handler ( exception )
Default error handler Will display an error page with the corresponding error code from template directory for example a not found will load a 404 . html etc . Will first look in userland app templates and if not found fallback to boiler templates to display a default page .
13,775
def _make_nonce ( self ) : chars = string . digits + string . ascii_letters nonce = '' . join ( random . choice ( chars ) for i in range ( 25 ) ) if self . _logging : utils . log ( 'nonce created: %s' % nonce ) return nonce
Generate a unique ID for the request 25 chars in length
13,776
def _make_auth ( self , method , date , nonce , path , query = { } , ctype = 'application/json' ) : query = urlencode ( query ) hmac_str = ( method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n' ) . lower ( ) . encode ( 'utf-8' ) signature = base64 . b64encode ( hmac . new ( self . _secret_key , hmac_str , digestmod = hashlib . sha256 ) . digest ( ) ) auth = 'On ' + self . _access_key . decode ( 'utf-8' ) + ':HmacSHA256:' + signature . decode ( 'utf-8' ) if self . _logging : utils . log ( { 'query' : query , 'hmac_str' : hmac_str , 'signature' : signature , 'auth' : auth } ) return auth
Create the request signature to authenticate
13,777
def _make_headers ( self , method , path , query = { } , headers = { } ) : date = datetime . datetime . utcnow ( ) . strftime ( '%a, %d %b %Y %H:%M:%S GMT' ) nonce = self . _make_nonce ( ) ctype = headers . get ( 'Content-Type' ) if headers . get ( 'Content-Type' ) else 'application/json' auth = self . _make_auth ( method , date , nonce , path , query = query , ctype = ctype ) req_headers = { 'Content-Type' : 'application/json' , 'Date' : date , 'On-Nonce' : nonce , 'Authorization' : auth , 'User-Agent' : 'Onshape Python Sample App' , 'Accept' : 'application/json' } for h in headers : req_headers [ h ] = headers [ h ] return req_headers
Creates a headers object to sign the request
13,778
def request ( self , method , path , query = { } , headers = { } , body = { } , base_url = None ) : req_headers = self . _make_headers ( method , path , query , headers ) if base_url is None : base_url = self . _url url = base_url + path + '?' + urlencode ( query ) if self . _logging : utils . log ( body ) utils . log ( req_headers ) utils . log ( 'request url: ' + url ) body = json . dumps ( body ) if type ( body ) == dict else body res = requests . request ( method , url , headers = req_headers , data = body , allow_redirects = False , stream = True ) if res . status_code == 307 : location = urlparse ( res . headers [ "Location" ] ) querystring = parse_qs ( location . query ) if self . _logging : utils . log ( 'request redirected to: ' + location . geturl ( ) ) new_query = { } new_base_url = location . scheme + '://' + location . netloc for key in querystring : new_query [ key ] = querystring [ key ] [ 0 ] return self . request ( method , location . path , query = new_query , headers = headers , base_url = new_base_url ) elif not 200 <= res . status_code <= 206 : if self . _logging : utils . log ( 'request failed, details: ' + res . text , level = 1 ) else : if self . _logging : utils . log ( 'request succeeded, details: ' + res . text ) return res
Issues a request to Onshape
13,779
def gen_sentences ( self , tokens , aliases = None ) : if aliases is None : aliases = { } for sentence in self . _gen_sentences ( tokens ) : try : alias = aliases [ str ( sentence [ 0 ] ) ] except KeyError : pass except IndexError : pass else : sentence [ 0 : 1 ] = list ( Program ( alias ) . gen_tokens ( ) ) yield transform ( Sentence ( sentence ) , self . transforms )
Generate a sequence of sentences from stream of tokens .
13,780
def set_timezone ( new_tz = None ) : global tz if new_tz : tz = pytz . timezone ( new_tz ) else : tz = tzlocal . get_localzone ( )
Set the timezone for datetime fields . By default is your machine s time . If it s called without parameter sets the local time again .
13,781
def fetch_and_parse ( method , uri , params_prefix = None , ** params ) : response = fetch ( method , uri , params_prefix , ** params ) return _parse ( json . loads ( response . text ) )
Fetch the given uri and return python dictionary with parsed data - types .
13,782
def _parse ( data ) : if not data : return [ ] elif isinstance ( data , ( tuple , list ) ) : return [ _parse ( subdata ) for subdata in data ] d = { ik : v for k in data . keys ( ) for ik , v in data [ k ] . items ( ) } to_parse = dict ( d ) for k , v in to_parse . items ( ) : if k in { "name" , "display_name" , "display_name_with_invitation_email_address" , "username" , "challonge_username" } : continue if isinstance ( v , TEXT_TYPE ) : try : dt = iso8601 . parse_date ( v ) d [ k ] = dt . astimezone ( tz ) except iso8601 . ParseError : try : d [ k ] = float ( v ) except ValueError : pass return d
Recursively convert a json into python data types
13,783
def get_departments_by_college ( college ) : url = "{}?{}" . format ( dept_search_url_prefix , urlencode ( { "college_abbreviation" : college . label } ) ) return _json_to_departments ( get_resource ( url ) , college )
Returns a list of restclients . Department models for the passed College model .
13,784
def scoped_session_decorator ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : from wallace . db import session as wallace_session with sessions_scope ( wallace_session ) as session : from psiturk . db import db_session as psi_session with sessions_scope ( psi_session ) as session_psiturk : logger . debug ( 'Running worker %s in scoped DB sessions' , func . __name__ ) return func ( * args , ** kwargs ) return wrapper
Manage contexts and add debugging to psiTurk sessions .
13,785
def readline ( self , continuation = False ) : prompt = ( self . secondary_prompt_string if continuation else self . primary_prompt_string ) try : line = raw_input ( prompt ) while line . endswith ( "\\" ) : line = line [ : - 1 ] + raw_input ( self . secondary_prompt_string ) except EOFError : raise SystemExit ( ) else : return line
Read a line from the terminal .
13,786
def readlines ( self ) : continuation = False while True : yield self . readline ( continuation ) continuation = True
Read a command from the terminal .
13,787
def sequence ( pdb_filepath ) : return '\n' . join ( [ '{0}\n{1}' . format ( chain_id , str ( seq ) ) for chain_id , seq in sorted ( PDB . from_filepath ( pdb_filepath ) . atom_sequences . iteritems ( ) ) ] )
A convenience method for printing PDB sequences in command - line execution .
13,788
def fix_pdb ( self ) : if self . strict : return chains = set ( ) for l in self . lines : if l . startswith ( 'ATOM ' ) or l . startswith ( 'HETATM' ) : chains . add ( l [ 21 ] ) if ' ' in chains : fresh_id = None allowed_chain_ids = list ( string . uppercase ) + list ( string . lowercase ) + map ( str , range ( 10 ) ) for c in chains : try : allowed_chain_ids . remove ( c ) except : pass if allowed_chain_ids : fresh_id = allowed_chain_ids [ 0 ] new_lines = [ ] if fresh_id : for l in self . lines : if ( l . startswith ( 'ATOM ' ) or l . startswith ( 'HETATM' ) ) and l [ 21 ] == ' ' : new_lines . append ( '%s%s%s' % ( l [ : 21 ] , fresh_id , l [ 22 : ] ) ) else : new_lines . append ( l ) self . lines = new_lines
A function to fix fatal errors in PDB files when they can be automatically fixed . At present this only runs if self . strict is False . We may want a separate property for this since we may want to keep strict mode but still allow PDBs to be fixed .
13,789
def replace_headers ( source_pdb_content , target_pdb_content ) : s = PDB ( source_pdb_content ) t = PDB ( target_pdb_content ) source_headers = [ ] for l in s . lines : if l [ : 6 ] . strip ( ) in non_header_records : break else : source_headers . append ( l ) target_body = [ ] in_header = True for l in t . lines : if l [ : 6 ] . strip ( ) in non_header_records : in_header = False if not in_header : target_body . append ( l ) return '\n' . join ( source_headers + target_body )
Takes the headers from source_pdb_content and adds them to target_pdb_content removing any headers that target_pdb_content had . Only the content up to the first structural line are taken from source_pdb_content and only the content from the first structural line in target_pdb_content are taken .
13,790
def from_lines ( pdb_file_lines , strict = True , parse_ligands = False ) : return PDB ( "\n" . join ( pdb_file_lines ) , strict = strict , parse_ligands = parse_ligands )
A function to replace the old constructor call where a list of the file s lines was passed in .
13,791
def _split_lines ( self ) : parsed_lines = { } for rt in all_record_types : parsed_lines [ rt ] = [ ] parsed_lines [ 0 ] = [ ] for line in self . lines : linetype = line [ 0 : 6 ] if linetype in all_record_types : parsed_lines [ linetype ] . append ( line ) else : parsed_lines [ 0 ] . append ( line ) self . parsed_lines = parsed_lines self . _update_structure_lines ( )
Creates the parsed_lines dict which keeps all record data in document order indexed by the record type .
13,792
def _update_structure_lines ( self ) : structure_lines = [ ] atom_chain_order = [ ] chain_atoms = { } for line in self . lines : linetype = line [ 0 : 6 ] if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ' : chain_id = line [ 21 ] self . residue_types . add ( line [ 17 : 20 ] . strip ( ) ) if missing_chain_ids . get ( self . pdb_id ) : chain_id = missing_chain_ids [ self . pdb_id ] structure_lines . append ( line ) if ( chain_id not in atom_chain_order ) and ( chain_id != ' ' ) : atom_chain_order . append ( chain_id ) if linetype == 'ATOM ' : atom_type = line [ 12 : 16 ] . strip ( ) if atom_type : chain_atoms [ chain_id ] = chain_atoms . get ( chain_id , set ( ) ) chain_atoms [ chain_id ] . add ( atom_type ) if linetype == 'ENDMDL' : colortext . warning ( "ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly." ) break self . structure_lines = structure_lines self . atom_chain_order = atom_chain_order self . chain_atoms = chain_atoms
ATOM and HETATM lines may be altered by function calls . When this happens this function should be called to keep self . structure_lines up to date .
13,793
def clone ( self , parse_ligands = False ) : return PDB ( "\n" . join ( self . lines ) , pdb_id = self . pdb_id , strict = self . strict , parse_ligands = parse_ligands )
A function to replace the old constructor call where a PDB object was passed in and cloned .
13,794
def get_pdb_id ( self ) : if self . pdb_id : return self . pdb_id else : header = self . parsed_lines [ "HEADER" ] assert ( len ( header ) <= 1 ) if header : self . pdb_id = header [ 0 ] [ 62 : 66 ] return self . pdb_id return None
Return the PDB ID . If one was passed in to the constructor this takes precedence otherwise the header is parsed to try to find an ID . The header does not always contain a PDB ID in regular PDB files and appears to always have an ID of XXXX in biological units so the constructor override is useful .
13,795
def get_annotated_chain_sequence_string ( self , chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = True ) : if use_seqres_sequences_if_possible and self . seqres_sequences and self . seqres_sequences . get ( chain_id ) : return ( 'SEQRES' , self . seqres_sequences [ chain_id ] ) elif self . atom_sequences . get ( chain_id ) : return ( 'ATOM' , self . atom_sequences [ chain_id ] ) elif raise_Exception_if_not_found : raise Exception ( 'Error: Chain %s expected but not found.' % ( str ( chain_id ) ) ) else : return None
A helper function to return the Sequence for a chain . If use_seqres_sequences_if_possible then we return the SEQRES Sequence if it exists . We return a tuple of values the first identifying which sequence was returned .
13,796
def get_chain_sequence_string ( self , chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = True ) : chain_pair = self . get_annotated_chain_sequence_string ( chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = raise_Exception_if_not_found ) if chain_pair : return chain_pair [ 1 ] return None
Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was .
13,797
def strip_HETATMs ( self , only_strip_these_chains = [ ] ) : if only_strip_these_chains : self . lines = [ l for l in self . lines if not ( l . startswith ( 'HETATM' ) ) or l [ 21 ] not in only_strip_these_chains ] else : self . lines = [ l for l in self . lines if not ( l . startswith ( 'HETATM' ) ) ] self . _update_structure_lines ( )
Throw away all HETATM lines . If only_strip_these_chains is specified then only strip HETATMs lines for those chains .
13,798
def _get_pdb_format_version ( self ) : if not self . format_version : version = None version_lines = None try : version_lines = [ line for line in self . parsed_lines [ 'REMARK' ] if int ( line [ 7 : 10 ] ) == 4 and line [ 10 : ] . strip ( ) ] except : pass if version_lines : assert ( len ( version_lines ) == 1 ) version_line = version_lines [ 0 ] version_regex = re . compile ( '.*?FORMAT V.(.*),' ) mtch = version_regex . match ( version_line ) if mtch and mtch . groups ( 0 ) : try : version = float ( mtch . groups ( 0 ) [ 0 ] ) except : pass self . format_version = version
Remark 4 indicates the version of the PDB File Format used to generate the file .
13,799
def get_atom_sequence_to_rosetta_json_map ( self ) : import json d = { } atom_sequence_to_rosetta_mapping = self . get_atom_sequence_to_rosetta_map ( ) for c , sm in atom_sequence_to_rosetta_mapping . iteritems ( ) : for k , v in sm . map . iteritems ( ) : d [ k ] = v return json . dumps ( d , indent = 4 , sort_keys = True )
Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format .