idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,700
def complete_delivery_note ( self , delivery_note_id , complete_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTES , billomat_id = delivery_note_id , command = COMPLETE , send_data = complete_dict )
Completes an delivery note
13,701
def delivery_note_pdf ( self , delivery_note_id ) : return self . _create_get_request ( resource = DELIVERY_NOTES , billomat_id = delivery_note_id , command = PDF )
Opens a pdf of a delivery note
13,702
def get_items_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_ITEMS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get items of delivery note per page
13,703
def get_all_items_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_items_of_delivery_note_per_page , resource = DELIVERY_NOTE_ITEMS , ** { 'delivery_note_id' : delivery_note_id } )
Get all items of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,704
def update_delivery_note_item ( self , delivery_note_item_id , delivery_note_item_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTE_ITEMS , billomat_id = delivery_note_item_id , send_data = delivery_note_item_dict )
Updates a delivery note item
13,705
def get_comments_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_COMMENTS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get comments of delivery note per page
13,706
def get_all_comments_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_comments_of_delivery_note_per_page , resource = DELIVERY_NOTE_COMMENTS , ** { 'delivery_note_id' : delivery_note_id } )
Get all comments of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,707
def update_delivery_note_comment ( self , delivery_note_comment_id , delivery_note_comment_dict ) : return self . _create_put_request ( resource = DELIVERY_NOTE_COMMENTS , billomat_id = delivery_note_comment_id , send_data = delivery_note_comment_dict )
Updates a delivery note comment
13,708
def get_tags_of_delivery_note_per_page ( self , delivery_note_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = DELIVERY_NOTE_TAGS , per_page = per_page , page = page , params = { 'delivery_note_id' : delivery_note_id } , )
Get tags of delivery note per page
13,709
def get_all_tags_of_delivery_note ( self , delivery_note_id ) : return self . _iterate_through_pages ( get_function = self . get_tags_of_delivery_note_per_page , resource = DELIVERY_NOTE_TAGS , ** { 'delivery_note_id' : delivery_note_id } )
Get all tags of delivery note This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,710
def get_letters_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = LETTERS , per_page = per_page , page = page , params = params )
Get letters per page
13,711
def get_all_letters ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_letters_per_page , resource = LETTERS , ** { 'params' : params } )
Get all letters This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,712
def update_letter ( self , letter_id , letter_dict ) : return self . _create_put_request ( resource = LETTERS , billomat_id = letter_id , send_data = letter_dict )
Updates a letter
13,713
def get_comments_of_letter_per_page ( self , letter_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = LETTER_COMMENTS , per_page = per_page , page = page , params = { 'letter_id' : letter_id } , )
Get comments of letter per page
13,714
def get_all_comments_of_letter ( self , letter_id ) : return self . _iterate_through_pages ( get_function = self . get_comments_of_letter_per_page , resource = LETTER_COMMENTS , ** { 'letter_id' : letter_id } )
Get all comments of letter This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,715
def update_letter_comment ( self , letter_comment_id , letter_comment_dict ) : return self . _create_put_request ( resource = LETTER_COMMENTS , billomat_id = letter_comment_id , send_data = letter_comment_dict )
Updates a letter comment
13,716
def get_tags_of_letter_per_page ( self , letter_id , per_page = 1000 , page = 1 ) : return self . _get_resource_per_page ( resource = LETTER_TAGS , per_page = per_page , page = page , params = { 'letter_id' : letter_id } , )
Get tags of letter per page
13,717
def get_all_tags_of_letter ( self , letter_id ) : return self . _iterate_through_pages ( get_function = self . get_tags_of_letter_per_page , resource = LETTER_TAGS , ** { 'letter_id' : letter_id } )
Get all tags of letter This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,718
def get_email_templates_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = EMAIL_TEMPLATES , per_page = per_page , page = page , params = params )
Get e - mail templates per page
13,719
def get_email_templates ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_email_templates_per_page , resource = EMAIL_TEMPLATES , ** { 'params' : params } )
Get all e - mail templates This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,720
def update_email_template ( self , template_id , template_dict ) : return self . _create_put_request ( resource = EMAIL_TEMPLATES , billomat_id = template_id , send_data = template_dict )
Updates a emailtemplate
13,721
def get_templates_per_page ( self , per_page = 1000 , page = 1 , params = None ) : return self . _get_resource_per_page ( resource = TEMPLATES , per_page = per_page , page = page , params = params )
Get templates per page
13,722
def get_all_templates ( self , params = None ) : if not params : params = { } return self . _iterate_through_pages ( self . get_templates_per_page , resource = TEMPLATES , ** { 'params' : params } )
Get all templates This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing
13,723
def update_template ( self , template_id , template_dict ) : return self . _create_put_request ( resource = TEMPLATES , billomat_id = template_id , send_data = template_dict )
Updates a template
13,724
def reverse_translate ( protein_seq , template_dna = None , leading_seq = None , trailing_seq = None , forbidden_seqs = ( ) , include_stop = True , manufacturer = None ) : if manufacturer == 'gen9' : forbidden_seqs += gen9 . reserved_restriction_sites leading_seq = restriction_sites . get ( leading_seq , leading_seq or...
Generate a well - behaved DNA sequence from the given protein sequence . If a template DNA sequence is specified the returned DNA sequence will be as similar to it as possible . Any given restriction sites will not be present in the sequence . And finally the given leading and trailing sequences will be appropriately c...
13,725
def make_codon_list ( protein_seq , template_dna = None , include_stop = True ) : codon_list = [ ] if template_dna is None : template_dna = [ ] for i , res in enumerate ( protein_seq . upper ( ) ) : try : template_codon = template_dna [ 3 * i : 3 * i + 3 ] except IndexError : template_codon = '---' possible_codons = dn...
Return a list of codons that would be translated to the given protein sequence . Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer optimal codons .
13,726
def sanitize_codon_list ( codon_list , forbidden_seqs = ( ) ) : for codon in codon_list : if len ( codon ) != 3 : raise ValueError ( "Codons must have exactly 3 bases: '{}'" . format ( codon ) ) bad_seqs = set ( ) bad_seqs . union ( restriction_sites . get ( seq , seq ) for seq in forbidden_seqs ) bad_seqs . union ( dn...
Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it . Undesirable sequences include restriction sites which may be optionally specified as a second argument and homopolymers above a pre - defined length . The return value is the number of corrections made to the...
13,727
def remove_bad_sequence ( codon_list , bad_seq , bad_seqs ) : gene_seq = '' . join ( codon_list ) problem = bad_seq . search ( gene_seq ) if not problem : return False bs_start_codon = problem . start ( ) // 3 bs_end_codon = problem . end ( ) // 3 for i in range ( bs_start_codon , bs_end_codon ) : problem_codon = codon...
Make a silent mutation to the given codon list to remove the first instance of the given bad sequence found in the gene sequence . If the bad sequence isn t found nothing happens and the function returns false . Otherwise the function returns true . You can use these return values to easily write a loop totally purges ...
13,728
def problem_with_codon ( codon_index , codon_list , bad_seqs ) : base_1 = 3 * codon_index base_3 = 3 * codon_index + 2 gene_seq = '' . join ( codon_list ) for bad_seq in bad_seqs : problem = bad_seq . search ( gene_seq ) if problem and problem . start ( ) < base_3 and problem . end ( ) > base_1 : return True return Fal...
Return true if the given codon overlaps with a bad sequence .
13,729
def sequences_from_fasta ( path ) : from Bio import SeqIO return { x . description : x . seq for x in SeqIO . parse ( path , 'fasta' ) }
Extract multiple sequences from a FASTA file .
13,730
def write_sequences_to_fasta ( path , seqs ) : from Bio import SeqIO from Bio . Seq import Seq from Bio . SeqRecord import SeqRecord path = Path ( path ) records = [ ] for id , seq in seqs . items ( ) : record = SeqRecord ( Seq ( seq ) , id = id , description = '' ) records . append ( record ) SeqIO . write ( records ,...
Create a FASTA file listing the given sequences .
13,731
def write_sequences_to_xlsx ( path , seqs ) : from openpyxl import Workbook wb = Workbook ( ) ws = wb . active for row , id in enumerate ( seqs , 1 ) : ws . cell ( row , 1 ) . value = id ws . cell ( row , 2 ) . value = seqs [ id ] wb . save ( path )
Create a XLSX file listing the given sequences .
13,732
def add_node ( self , node ) : nodes = [ n for n in self . nodes ( ) if not isinstance ( n , Source ) ] num_agents = len ( nodes ) curr_generation = int ( ( num_agents - 1 ) / float ( self . generation_size ) ) node . generation = curr_generation if curr_generation == 0 : if self . initial_source : source = min ( self ...
Link the agent to a random member of the previous generation .
13,733
def add_node ( self , node ) : nodes = self . nodes ( ) if len ( nodes ) <= self . m0 : other_nodes = [ n for n in nodes if n . id != node . id ] for n in other_nodes : node . connect ( direction = "both" , whom = n ) else : for idx_newvector in xrange ( self . m ) : these_nodes = [ n for n in nodes if ( n . id != node...
Add newcomers one by one using linear preferential attachment .
13,734
def docker_py_dict ( self ) : return { 'image' : self . image , 'command' : self . cmd , 'hostname' : self . hostname , 'user' : self . user , 'detach' : self . detach , 'stdin_open' : self . open_stdin , 'tty' : self . tty , 'ports' : self . exposed_ports , 'environment' : self . env , 'volumes' : self . volumes , 'ne...
Convert object to match valid docker - py properties .
13,735
def get_person_by_regid ( self , regid ) : if not self . valid_uwregid ( regid ) : raise InvalidRegID ( regid ) url = "{}/{}/full.json" . format ( PERSON_PREFIX , regid . upper ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , resp...
Returns a restclients . Person object for the given regid . If the regid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,736
def get_person_by_netid ( self , netid ) : if not self . valid_uwnetid ( netid ) : raise InvalidNetID ( netid ) url = "{}/{}/full.json" . format ( PERSON_PREFIX , netid . lower ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , resp...
Returns a restclients . Person object for the given netid . If the netid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,737
def get_person_by_employee_id ( self , employee_id ) : if not self . valid_employee_id ( employee_id ) : raise InvalidEmployeeID ( employee_id ) url = "{}.json?{}" . format ( PERSON_PREFIX , urlencode ( { "employee_id" : employee_id } ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response ....
Returns a restclients . Person object for the given employee id . If the employee id isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,738
def get_person_by_student_number ( self , student_number ) : if not self . valid_student_number ( student_number ) : raise InvalidStudentNumber ( student_number ) url = "{}.json?{}" . format ( PERSON_PREFIX , urlencode ( { "student_number" : student_number } ) ) response = DAO . getURL ( url , { "Accept" : "application...
Returns a restclients . Person object for the given student number . If the student number isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,739
def get_person_by_prox_rfid ( self , prox_rfid ) : if not self . valid_prox_rfid ( prox_rfid ) : raise InvalidProxRFID ( prox_rfid ) url = "{}.json?{}" . format ( CARD_PREFIX , urlencode ( { "prox_rfid" : prox_rfid } ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : r...
Returns a restclients . Person object for the given rfid . If the rfid isn t found or if there is an error communicating with the IdCard WS a DataFailureException will be thrown .
13,740
def get_entity_by_regid ( self , regid ) : if not self . valid_uwregid ( regid ) : raise InvalidRegID ( regid ) url = "{}/{}.json" . format ( ENTITY_PREFIX , regid . upper ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response ...
Returns a restclients . Entity object for the given regid . If the regid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,741
def get_entity_by_netid ( self , netid ) : if not self . valid_uwnetid ( netid ) : raise InvalidNetID ( netid ) url = "{}/{}.json" . format ( ENTITY_PREFIX , netid . lower ( ) ) response = DAO . getURL ( url , { "Accept" : "application/json" } ) if response . status != 200 : raise DataFailureException ( url , response ...
Returns a restclients . Entity object for the given netid . If the netid isn t found or if there is an error communicating with the PWS a DataFailureException will be thrown .
13,742
def generate_identifier ( sender , instance , ** kwargs ) : identifier = Concept . create_identifier ( instance . query ) qs = Concept . objects . filter ( identifier = identifier , lang = instance . lang ) if instance . pk : qs = qs . exclude ( pk = instance . pk ) if qs . count ( ) > 0 : raise ValueError ( "Concept i...
Generate and set identifier of concept before saving object to DB
13,743
def get_concept_item_mapping ( self , concepts = None , lang = None ) : if concepts is None : concepts = self . filter ( active = True ) if lang is not None : concepts = concepts . filter ( lang = lang ) if lang is None : languages = set ( [ concept . lang for concept in concepts ] ) if len ( languages ) > 1 : raise Ex...
Get mapping of concepts to items belonging to concept .
13,744
def get_item_concept_mapping ( self , lang ) : concepts = self . filter ( active = True , lang = lang ) return group_keys_by_value_lists ( Concept . objects . get_concept_item_mapping ( concepts , lang ) )
Get mapping of items_ids to concepts containing these items
13,745
def get_concepts_to_recalculate ( self , users , lang , concepts = None ) : only_one_user = False if not isinstance ( users , list ) : only_one_user = True users = [ users ] mapping = self . get_item_concept_mapping ( lang ) current_user_stats = defaultdict ( lambda : { } ) user_stats_qs = UserStat . objects . filter (...
Get concept which have same changes and have to be recalculated
13,746
def recalculate_concepts ( self , concepts , lang = None ) : if len ( concepts ) == 0 : return if lang is None : items = Concept . objects . get_concept_item_mapping ( concepts = Concept . objects . filter ( pk__in = set ( flatten ( concepts . values ( ) ) ) ) ) else : items = Concept . objects . get_concept_item_mappi...
Recalculated given concepts for given users
13,747
def get_user_stats ( self , users , lang = None , concepts = None , since = None , recalculate = True ) : only_one_user = False if not isinstance ( users , list ) : users = [ users ] only_one_user = True if recalculate : if lang is None : raise ValueError ( 'Recalculation without lang is not supported.' ) time_start = ...
Finds all UserStats of given concepts and users . Recompute UserStats if necessary
13,748
def locked_execute ( self , sql , parameters = None , cursorClass = DictCursor , quiet = False ) : return self . execute ( sql , parameters , cursorClass , quiet = quiet , locked = True )
We are lock - happy here but SQL performance is not currently an issue daemon - side .
13,749
def execute ( self , sql , parameters = None , cursorClass = DictCursor , quiet = False , locked = False , do_commit = True ) : i = 0 errcode = 0 caughte = None cursor = None if sql . find ( ";" ) != - 1 or sql . find ( "\\G" ) != - 1 : raise Exception ( "The SQL command '%s' contains a semi-colon or \\G. This is a pot...
Execute SQL query . This uses DictCursor by default .
13,750
def insertDict ( self , tblname , d , fields = None ) : if fields == None : fields = sorted ( d . keys ( ) ) values = None try : SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) ) values = tuple ( [ d [ k ] for k in fields ] ) self ...
Simple function for inserting a dictionary whose keys match the fieldnames of tblname .
13,751
def callproc ( self , procname , parameters = ( ) , cursorClass = DictCursor , quiet = False ) : i = 0 errcode = 0 caughte = None while i < self . numTries : i += 1 try : cursor = self . connection . cursor ( cursorClass ) if type ( parameters ) != type ( ( ) ) : parameters = ( parameters , ) errcode = cursor . callpro...
Calls a MySQL stored procedure procname . This uses DictCursor by default .
13,752
def execQuery ( self , sql , parameters = None , cursorClass = MySQLdb . cursors . Cursor , InnoDB = False ) : i = 0 errcode = 0 caughte = None while i < self . numTries : i += 1 try : cursor = self . connection . cursor ( cursorClass ) if parameters : errcode = cursor . execute ( sql , parameters ) else : errcode = cu...
Execute SQL query .
13,753
def _getFieldsInDB ( self , tablename ) : SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename array_data = self . execQuery ( SQL ) return [ x [ 0 ] for x in array_data ]
get all the fields from a specific table
13,754
def _is_ascii_stl ( first_bytes ) : is_ascii = False if 'solid' in first_bytes . decode ( "utf-8" ) . lower ( ) : is_ascii = True return is_ascii
Determine if this is an ASCII based data stream simply by checking the bytes for the word solid .
13,755
def _is_binary_stl ( data ) : is_bin = False start_byte = 0 end_byte = 80 _ = data [ start_byte : end_byte ] start_byte = end_byte end_byte += 4 facet_count = struct . unpack ( 'I' , data [ start_byte : end_byte ] ) [ 0 ] if facet_count > 0 : is_bin = True return is_bin
Determine if this is a binary file through unpacking the first value after the 80th character and testing whether this value is greater than zero . This indicates the number of facets in the file . Could possibly extend this to check that the remaining number of bytes is divisible by 50 .
13,756
def process_bind_param ( self , obj , dialect ) : value = obj or { } if isinstance ( obj , flask_cloudy . Object ) : value = { } for k in self . DEFAULT_KEYS : value [ k ] = getattr ( obj , k ) return super ( self . __class__ , self ) . process_bind_param ( value , dialect )
Get a flask_cloudy . Object and save it as a dict
13,757
def create_tasks ( task_coro , addrs , * args , flatten = True , ** kwargs ) : tasks = [ ] for agent_addr in addrs : task = asyncio . ensure_future ( task_coro ( agent_addr , * args , ** kwargs ) ) tasks . append ( task ) return wait_tasks ( tasks , flatten )
Create and schedule a set of asynchronous tasks .
13,758
async def wait_tasks ( tasks , flatten = True ) : rets = await asyncio . gather ( * tasks ) if flatten and all ( map ( lambda x : hasattr ( x , '__iter__' ) , rets ) ) : rets = list ( itertools . chain ( * rets ) ) return rets
Gather a list of asynchronous tasks and wait their completion .
13,759
def split_addrs ( addrs ) : splitted = { } for addr in addrs : host , port , _ = _addr_key ( addr ) if host not in splitted : splitted [ host ] = { } if port not in splitted [ host ] : splitted [ host ] [ port ] = [ ] splitted [ host ] [ port ] . append ( addr ) return splitted
Split addresses into dictionaries by hosts and ports .
13,760
def addrs2managers ( addrs ) : mgrs = { } for addr in addrs : mgr_addr = get_manager ( addr ) if mgr_addr not in mgrs : mgrs [ mgr_addr ] = [ ] mgrs [ mgr_addr ] . append ( addr ) return mgrs
Map agent addresses to their assumed managers .
13,761
def create_jinja_env ( template_path ) : jinja_env = jinja2 . Environment ( loader = jinja2 . FileSystemLoader ( template_path ) , block_start_string = '{%' , block_end_string = '%}' , variable_start_string = '${' , variable_end_string = '}' , comment_start_string = '{#' , comment_end_string = '#}' , line_statement_pre...
Creates a Jinja2 environment with a specific template path .
13,762
def _debug_info ( self ) : self . _msg ( 'DEBUG' ) self . _msg2 ( 'WorkDir: {0}' . format ( self . _curdir ) ) self . _msg2 ( 'Cookies: {0}' . format ( self . _session . cookies ) ) self . _msg2 ( 'Headers: {0}' . format ( self . _session . headers ) ) self . _msg2 ( 'Configs: {0}' . format ( self . _config ) ) self . ...
Show a list of recently variables info .
13,763
def register ( self , argtypes = r'M' , help_msg = None ) : def format_args ( method ) : def wrapped_method ( * args , ** kwargs ) : args_count = len ( args ) argtypes_count = len ( argtypes ) placeholder_count = argtypes . count ( 'H' ) + argtypes . count ( 'h' ) if placeholder_count : min_args_count = ( argtypes_coun...
Register a method to a command .
13,764
def run ( self , tags , begin , end = False ) : if not end : end = begin super ( KngetShell , self ) . run ( tags , begin , int ( end ) )
Override method of class Knget
13,765
def write ( models , out = None , base = None , propertybase = None , shorteners = None , logger = logging ) : assert out is not None if not isinstance ( models , list ) : models = [ models ] shorteners = shorteners or { } all_propertybase = [ propertybase ] if propertybase else [ ] all_propertybase . append ( VERSA_BA...
models - input Versa models from which output is generated . Must be a sequence object not an iterator
13,766
def register_routes ( self ) : routes = self . flatten_urls ( self . urls ) self . controllers = { } controller_names = set ( ) for route in routes : cname = route [ 'endpoint' ] . split ( '.' ) [ 0 ] controller_names . add ( cname ) for cname in controller_names : attr = getattr ( self . mcontrollers , cname ) instanc...
Function creates instances of controllers adds into bottle routes
13,767
def register_extensions ( self ) : try : for extension , config in self . config [ 'extensions' ] . items ( ) : extension_bstr = '' extension_pieces = extension . split ( '.' ) if len ( extension_pieces ) > 1 : extension_bstr = '.' . join ( extension_pieces ) else : extension_bstr = 'glim_extensions.%s' % extension_pie...
Function registers extensions given extensions list
13,768
def register_ssl_context ( self ) : if not empty ( 'ssl' , self . config [ 'app' ] ) : self . ssl_context = self . config [ 'app' ] [ 'ssl' ] else : self . ssl_context = None
Function detects ssl context
13,769
def flatten_urls ( self , urls ) : available_methods = [ 'POST' , 'PUT' , 'OPTIONS' , 'GET' , 'DELETE' , 'TRACE' , 'COPY' ] ruleset = [ ] for route , endpoint in urls . items ( ) : route_pieces = route . split ( ' ' ) try : methods = url = None if len ( route_pieces ) > 1 : methods = [ route_pieces [ 0 ] ] url = route_...
Function flatten urls for route grouping feature of glim .
13,770
def _info ( self , args , ** extra_args ) : if not isinstance ( args , argparse . Namespace ) : raise logger . error ( Exception ( "args should of an instance of argparse.Namespace" ) ) logger . info ( "Freight Forwarder: {0}" . format ( VERSION ) ) logger . info ( "docker-py: {0}" . format ( docker_py_version ) ) logg...
Print freight forwarder info to the user .
13,771
def register ( self , typ ) : def _func ( cls ) : if typ in self . _class : raise ValueError ( "duplicated type name '%s'" % typ ) cls . plugin_type = typ self . _class [ typ ] = cls return cls return _func
register a plugin
13,772
def get_plugin_class ( self , typ ) : if typ in self . _class : return self . _class [ typ ] try : importlib . import_module ( "%s.%s" % ( self . namespace , typ ) ) if typ in self . _class : return self . _class [ typ ] except ImportError as e : self . log . debug ( "ImportError " + str ( e ) ) raise ValueError ( "unk...
get class by name
13,773
def register_error_handler ( app , handler = None ) : if not handler : handler = default_error_handler for code in exceptions . default_exceptions . keys ( ) : app . register_error_handler ( code , handler )
Register error handler Registers an exception handler on the app instance for every type of exception code werkzeug is aware about .
13,774
def default_error_handler ( exception ) : http_exception = isinstance ( exception , exceptions . HTTPException ) code = exception . code if http_exception else 500 if code == 500 : current_app . logger . error ( exception ) if has_app_context ( ) and has_request_context ( ) : headers = request . headers if 'Accept' in ...
Default error handler Will display an error page with the corresponding error code from template directory for example a not found will load a 404 . html etc . Will first look in userland app templates and if not found fallback to boiler templates to display a default page .
13,775
def _make_nonce ( self ) : chars = string . digits + string . ascii_letters nonce = '' . join ( random . choice ( chars ) for i in range ( 25 ) ) if self . _logging : utils . log ( 'nonce created: %s' % nonce ) return nonce
Generate a unique ID for the request 25 chars in length
13,776
def _make_auth ( self , method , date , nonce , path , query = { } , ctype = 'application/json' ) : query = urlencode ( query ) hmac_str = ( method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n' ) . lower ( ) . encode ( 'utf-8' ) signature = base64 . b64encode ( hmac . new ( self . _sec...
Create the request signature to authenticate
13,777
def _make_headers ( self , method , path , query = { } , headers = { } ) : date = datetime . datetime . utcnow ( ) . strftime ( '%a, %d %b %Y %H:%M:%S GMT' ) nonce = self . _make_nonce ( ) ctype = headers . get ( 'Content-Type' ) if headers . get ( 'Content-Type' ) else 'application/json' auth = self . _make_auth ( met...
Creates a headers object to sign the request
13,778
def request ( self , method , path , query = { } , headers = { } , body = { } , base_url = None ) : req_headers = self . _make_headers ( method , path , query , headers ) if base_url is None : base_url = self . _url url = base_url + path + '?' + urlencode ( query ) if self . _logging : utils . log ( body ) utils . log ...
Issues a request to Onshape
13,779
def gen_sentences ( self , tokens , aliases = None ) : if aliases is None : aliases = { } for sentence in self . _gen_sentences ( tokens ) : try : alias = aliases [ str ( sentence [ 0 ] ) ] except KeyError : pass except IndexError : pass else : sentence [ 0 : 1 ] = list ( Program ( alias ) . gen_tokens ( ) ) yield tran...
Generate a sequence of sentences from stream of tokens .
13,780
def set_timezone ( new_tz = None ) : global tz if new_tz : tz = pytz . timezone ( new_tz ) else : tz = tzlocal . get_localzone ( )
Set the timezone for datetime fields . By default is your machine s time . If it s called without parameter sets the local time again .
13,781
def fetch_and_parse ( method , uri , params_prefix = None , ** params ) : response = fetch ( method , uri , params_prefix , ** params ) return _parse ( json . loads ( response . text ) )
Fetch the given uri and return python dictionary with parsed data - types .
13,782
def _parse ( data ) : if not data : return [ ] elif isinstance ( data , ( tuple , list ) ) : return [ _parse ( subdata ) for subdata in data ] d = { ik : v for k in data . keys ( ) for ik , v in data [ k ] . items ( ) } to_parse = dict ( d ) for k , v in to_parse . items ( ) : if k in { "name" , "display_name" , "displ...
Recursively convert a json into python data types
13,783
def get_departments_by_college ( college ) : url = "{}?{}" . format ( dept_search_url_prefix , urlencode ( { "college_abbreviation" : college . label } ) ) return _json_to_departments ( get_resource ( url ) , college )
Returns a list of restclients . Department models for the passed College model .
13,784
def scoped_session_decorator ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : from wallace . db import session as wallace_session with sessions_scope ( wallace_session ) as session : from psiturk . db import db_session as psi_session with sessions_scope ( psi_session ) as session_psiturk : logger . debu...
Manage contexts and add debugging to psiTurk sessions .
13,785
def readline ( self , continuation = False ) : prompt = ( self . secondary_prompt_string if continuation else self . primary_prompt_string ) try : line = raw_input ( prompt ) while line . endswith ( "\\" ) : line = line [ : - 1 ] + raw_input ( self . secondary_prompt_string ) except EOFError : raise SystemExit ( ) else...
Read a line from the terminal .
13,786
def readlines ( self ) : continuation = False while True : yield self . readline ( continuation ) continuation = True
Read a command from the terminal .
13,787
def sequence ( pdb_filepath ) : return '\n' . join ( [ '{0}\n{1}' . format ( chain_id , str ( seq ) ) for chain_id , seq in sorted ( PDB . from_filepath ( pdb_filepath ) . atom_sequences . iteritems ( ) ) ] )
A convenience method for printing PDB sequences in command - line execution .
13,788
def fix_pdb ( self ) : if self . strict : return chains = set ( ) for l in self . lines : if l . startswith ( 'ATOM ' ) or l . startswith ( 'HETATM' ) : chains . add ( l [ 21 ] ) if ' ' in chains : fresh_id = None allowed_chain_ids = list ( string . uppercase ) + list ( string . lowercase ) + map ( str , range ( 10 ) ...
A function to fix fatal errors in PDB files when they can be automatically fixed . At present this only runs if self . strict is False . We may want a separate property for this since we may want to keep strict mode but still allow PDBs to be fixed .
13,789
def replace_headers ( source_pdb_content , target_pdb_content ) : s = PDB ( source_pdb_content ) t = PDB ( target_pdb_content ) source_headers = [ ] for l in s . lines : if l [ : 6 ] . strip ( ) in non_header_records : break else : source_headers . append ( l ) target_body = [ ] in_header = True for l in t . lines : if...
Takes the headers from source_pdb_content and adds them to target_pdb_content removing any headers that target_pdb_content had . Only the content up to the first structural line are taken from source_pdb_content and only the content from the first structural line in target_pdb_content are taken .
13,790
def from_lines ( pdb_file_lines , strict = True , parse_ligands = False ) : return PDB ( "\n" . join ( pdb_file_lines ) , strict = strict , parse_ligands = parse_ligands )
A function to replace the old constructor call where a list of the file s lines was passed in .
13,791
def _split_lines ( self ) : parsed_lines = { } for rt in all_record_types : parsed_lines [ rt ] = [ ] parsed_lines [ 0 ] = [ ] for line in self . lines : linetype = line [ 0 : 6 ] if linetype in all_record_types : parsed_lines [ linetype ] . append ( line ) else : parsed_lines [ 0 ] . append ( line ) self . parsed_line...
Creates the parsed_lines dict which keeps all record data in document order indexed by the record type .
13,792
def _update_structure_lines ( self ) : structure_lines = [ ] atom_chain_order = [ ] chain_atoms = { } for line in self . lines : linetype = line [ 0 : 6 ] if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ' : chain_id = line [ 21 ] self . residue_types . add ( line [ 17 : 20 ] . strip ( ) ) if missi...
ATOM and HETATM lines may be altered by function calls . When this happens this function should be called to keep self . structure_lines up to date .
13,793
def clone ( self , parse_ligands = False ) : return PDB ( "\n" . join ( self . lines ) , pdb_id = self . pdb_id , strict = self . strict , parse_ligands = parse_ligands )
A function to replace the old constructor call where a PDB object was passed in and cloned .
13,794
def get_pdb_id ( self ) : if self . pdb_id : return self . pdb_id else : header = self . parsed_lines [ "HEADER" ] assert ( len ( header ) <= 1 ) if header : self . pdb_id = header [ 0 ] [ 62 : 66 ] return self . pdb_id return None
Return the PDB ID . If one was passed in to the constructor this takes precedence otherwise the header is parsed to try to find an ID . The header does not always contain a PDB ID in regular PDB files and appears to always have an ID of XXXX in biological units so the constructor override is useful .
13,795
def get_annotated_chain_sequence_string ( self , chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = True ) : if use_seqres_sequences_if_possible and self . seqres_sequences and self . seqres_sequences . get ( chain_id ) : return ( 'SEQRES' , self . seqres_sequences [ chain_id ] ) elif self . a...
A helper function to return the Sequence for a chain . If use_seqres_sequences_if_possible then we return the SEQRES Sequence if it exists . We return a tuple of values the first identifying which sequence was returned .
13,796
def get_chain_sequence_string ( self , chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = True ) : chain_pair = self . get_annotated_chain_sequence_string ( chain_id , use_seqres_sequences_if_possible , raise_Exception_if_not_found = raise_Exception_if_not_found ) if chain_pair : return chain_...
Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was .
13,797
def strip_HETATMs ( self , only_strip_these_chains = [ ] ) : if only_strip_these_chains : self . lines = [ l for l in self . lines if not ( l . startswith ( 'HETATM' ) ) or l [ 21 ] not in only_strip_these_chains ] else : self . lines = [ l for l in self . lines if not ( l . startswith ( 'HETATM' ) ) ] self . _update_s...
Throw away all HETATM lines . If only_strip_these_chains is specified then only strip HETATMs lines for those chains .
13,798
def _get_pdb_format_version ( self ) : if not self . format_version : version = None version_lines = None try : version_lines = [ line for line in self . parsed_lines [ 'REMARK' ] if int ( line [ 7 : 10 ] ) == 4 and line [ 10 : ] . strip ( ) ] except : pass if version_lines : assert ( len ( version_lines ) == 1 ) versi...
Remark 4 indicates the version of the PDB File Format used to generate the file .
13,799
def get_atom_sequence_to_rosetta_json_map ( self ) : import json d = { } atom_sequence_to_rosetta_mapping = self . get_atom_sequence_to_rosetta_map ( ) for c , sm in atom_sequence_to_rosetta_mapping . iteritems ( ) : for k , v in sm . map . iteritems ( ) : d [ k ] = v return json . dumps ( d , indent = 4 , sort_keys = ...
Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format .