idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
16,300 | def check_tags ( repos , tags , ignore_existing = False , fail_fast = False ) : debug ( "looking for {n} tag(s):" . format ( n = len ( tags ) ) ) [ debug ( " {t}" . format ( t = t ) ) for t in tags ] debug ( "in {n} repo(s):" . format ( n = len ( repos ) ) ) [ debug ( " {r}" . format ( r = r . full_name ) ) for r in repos ] present_tags = { } absent_tags = { } problems = [ ] for r in repos : has_tags = find_tags_in_repo ( r , tags ) if has_tags : if not ignore_existing : yikes = GitTagExistsError ( "tag(s) {tag} already exists in repos {r}" . format ( tag = list ( has_tags . keys ( ) ) , r = r . full_name ) ) if fail_fast : raise yikes problems . append ( yikes ) error ( yikes ) present_tags [ r . full_name ] = { 'repo' : r , 'tags' : list ( has_tags . values ( ) ) , } missing_tags = [ x for x in tags if x not in has_tags ] if missing_tags : absent_tags [ r . full_name ] = { 'repo' : r , 'need_tags' : missing_tags , } debug ( textwrap . dedent ( ) . format ( n_with = len ( present_tags ) , n_none = len ( absent_tags ) , errors = len ( problems ) , ) ) return present_tags , absent_tags , problems | check if tags already exist in repos |
16,301 | def delete_refs ( repo , refs , dry_run = False ) : assert isinstance ( repo , github . Repository . Repository ) , type ( repo ) debug ( "removing {n} refs from {repo}" . format ( n = len ( refs ) , repo = repo . full_name ) ) for r in refs : debug ( " deleting {ref}" . format ( ref = r . ref ) ) if dry_run : debug ( ' (noop)' ) continue r . delete ( ) | Note that only the ref to a tag can be explicitly removed . The tag object will leave on until it s gargabe collected . |
16,302 | def get_by ( self , name ) : item = self . app . get_by ( name ) return TodoListUX ( ux = self , controlled_list = item ) | get a todo list ux by name |
16,303 | def create_item ( self , name ) : item = self . app . create_item ( name ) return TodoListUX ( ux = self , controlled_list = item ) | create a new named todo list |
16,304 | def get_by ( self , name ) : item = self . controlled_list . get_by ( name ) if item : return TodoElementUX ( parent = self , controlled_element = item ) | find a todo list element by name |
16,305 | def create_item ( self , name ) : elem = self . controlled_list . create_item ( name ) if elem : return TodoElementUX ( parent = self , controlled_element = elem ) | create a new todo list item |
16,306 | def chose_blacklist ( self , ip ) : blacklist = 'ellis_blacklist{0}' try : address = ipaddress . ip_address ( ip ) except ipaddress . AddressValueError : raise else : if address . version is 6 : if address . is_private : msg = "We don't ban private addresses ({0} given)." . format ( address ) raise ipaddress . AddressValueError ( msg ) else : if address . ipv4_mapped is not None : address = address . ipv4_mapped elif address . sixtofour is not None : address = address . sixtofour blacklist = blacklist . format ( address . version ) return ( address , blacklist ) | Given an IP address figure out the set we have to use . |
16,307 | def under_attack ( col , queens ) : left = right = col for _ , column in reversed ( queens ) : left , right = left - 1 , right + 1 if column in ( left , col , right ) : return True return False | Checks if queen is under attack |
16,308 | def _get_bundles_by_type ( self , type ) : bundles = { } bundle_definitions = self . config . get ( type ) if bundle_definitions is None : return bundles for bundle_name , paths in bundle_definitions . items ( ) : bundle_files = [ ] for path in paths : pattern = abspath = os . path . join ( self . basedir , path ) assetdir = os . path . dirname ( abspath ) fnames = [ os . path . join ( assetdir , fname ) for fname in os . listdir ( assetdir ) ] expanded_fnames = fnmatch . filter ( fnames , pattern ) bundle_files . extend ( sorted ( expanded_fnames ) ) bundles [ bundle_name ] = bundle_files return bundles | Get a dictionary of bundles for requested type . |
16,309 | def getmlsthelper ( referencefilepath , start , organism , update ) : from accessoryFunctions . accessoryFunctions import GenObject organismset = set ( ) organism = organism if organism != 'Shigella' else 'Escherichia' organismdictionary = { 'Escherichia' : 'Escherichia coli#1' , 'Shigella' : 'Escherichia coli#1' , 'Vibrio' : 'Vibrio parahaemolyticus' , 'Campylobacter' : 'Campylobacter jejuni' , 'Listeria' : 'Listeria monocytogenes' , 'Bacillus' : 'Bacillus cereus' , 'Klebsiella' : 'Klebsiella pneumoniae' } try : organismset . add ( organismdictionary [ organism ] ) except KeyError : organismset . add ( organism ) for scheme in organismset : organismpath = os . path . join ( referencefilepath , 'MLST' , organism ) try : lastfolder = sorted ( glob ( '{}/*/' . format ( organismpath ) ) ) [ - 1 ] . rstrip ( '/' ) except IndexError : lastfolder = [ ] delta , foldersize , d1 = schemedate ( lastfolder ) newfolder = '{}/{}' . format ( organismpath , d1 ) if update : if delta . days > 7 or foldersize < 100 : printtime ( 'Downloading {} MLST scheme from pubmlst.org' . format ( organism ) , start ) getmlstargs = GenObject ( ) getmlstargs . species = scheme getmlstargs . repository_url = 'http://pubmlst.org/data/dbases.xml' getmlstargs . force_scheme_name = False getmlstargs . path = newfolder make_path ( getmlstargs . path ) getmlst . main ( getmlstargs ) try : profilestart = open ( glob ( '{}/*.txt' . format ( newfolder ) ) [ 0 ] ) . readline ( ) except IndexError : profilestart = [ ] if not profilestart or profilestart [ 0 ] == '<' : shutil . rmtree ( newfolder ) newfolder = lastfolder else : newfolder = lastfolder else : newfolder = lastfolder try : newfoldersize = sum ( os . path . getsize ( '{}/{}' . format ( newfolder , f ) ) for f in os . listdir ( newfolder ) if os . path . isfile ( '{}/{}' . format ( newfolder , f ) ) ) except ( OSError , TypeError ) : newfoldersize = 100 if newfoldersize < 100 : shutil . rmtree ( newfolder ) try : newfolder = sorted ( glob ( '{}/*/' . format ( organismpath ) ) ) [ - 1 ] . rstrip ( '/' ) except IndexError : newfolder = organismpath return newfolder | Prepares to run the getmlst . py script provided in SRST2 |
16,310 | def blastnprep ( self ) : for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : sample [ self . analysistype ] . closealleles = dict ( ) sample [ self . analysistype ] . mismatches = dict ( ) sample [ self . analysistype ] . alignmentlength = dict ( ) sample [ self . analysistype ] . subjectlength = dict ( ) sample [ self . analysistype ] . queryid = dict ( ) sample [ self . analysistype ] . start = dict ( ) sample [ self . analysistype ] . end = dict ( ) sample [ self . analysistype ] . queryseq = dict ( ) if type ( sample [ self . analysistype ] . allelenames ) == list : for allele in sample [ self . analysistype ] . combinedalleles : self . runblast ( sample . general . bestassemblyfile , allele , sample ) | Setup blastn analyses |
16,311 | def strainer ( self ) : analyse = list ( ) for sample in self . runmetadata . samples : if sample . general . bestassemblyfile != 'NA' : try : if os . path . isfile ( '{}{}_{}.csv' . format ( sample [ self . analysistype ] . reportdir , sample . name , self . analysistype ) ) : if self . analysistype == 'rmlst' : updatecall , allelefolder = getrmlsthelper ( self . referencefilepath , self . updatedatabases , self . start ) else : allelefolder = getmlsthelper ( self . referencefilepath , self . start , sample . general . referencegenus , self . updatedatabases ) self . alleles = glob ( '{}/*.tfa' . format ( allelefolder ) ) sample [ self . analysistype ] . alleles = self . alleles sample [ self . analysistype ] . allelenames = [ os . path . split ( x ) [ 1 ] . split ( '.' ) [ 0 ] for x in self . alleles ] analyse . append ( False ) else : self . populator ( sample ) analyse . append ( True ) except ( KeyError , AttributeError ) : self . populator ( sample ) analyse . append ( True ) else : self . populator ( sample ) analyse . append ( False ) MLST ( self ) | Determine whether it is required to run the MLST analyses |
16,312 | def get_seconds ( self ) : parsed = self . parse_hh_mm_ss ( ) total_seconds = parsed . second total_seconds += parsed . minute * 60.0 total_seconds += parsed . hour * 60.0 * 60.0 return total_seconds | Gets seconds from raw time |
16,313 | def get_email_content ( file_path ) : with open ( file_path , "r" ) as in_file : text = str ( in_file . read ( ) ) return text . replace ( "\n\n" , "<br>" ) | Email content in file |
16,314 | def set ( self , ** kwargs ) : self . player_lock . acquire ( ) if 'acqtime' in kwargs : self . player . set_aidur ( kwargs [ 'acqtime' ] ) if 'aifs' in kwargs : self . player . set_aifs ( kwargs [ 'aifs' ] ) self . aifs = kwargs [ 'aifs' ] if 'aifs' in kwargs or 'acqtime' in kwargs : t = kwargs . get ( 'acqtime' , self . player . get_aidur ( ) ) npoints = t * float ( kwargs . get ( 'aifs' , self . player . get_aifs ( ) ) ) self . aitimes = np . linspace ( 0 , t , npoints ) if 'trigger' in kwargs : self . player . set_trigger ( kwargs [ 'trigger' ] ) self . player_lock . release ( ) if 'aochan' in kwargs : self . aochan = kwargs [ 'aochan' ] if 'aichan' in kwargs : self . aichan = kwargs [ 'aichan' ] if 'binsz' in kwargs : self . binsz = kwargs [ 'binsz' ] if 'save' in kwargs : self . save_data = kwargs [ 'save' ] if 'caldb' in kwargs : self . caldb = kwargs [ 'caldb' ] if 'calv' in kwargs : self . calv = kwargs [ 'calv' ] if 'calf' in kwargs : self . calf = kwargs [ 'calf' ] if 'caldb' in kwargs or 'calv' in kwargs : self . update_reference_voltage ( ) if 'datafile' in kwargs : self . datafile = kwargs [ 'datafile' ] if 'reprate' in kwargs : self . reprate = kwargs [ 'reprate' ] if 'save' in kwargs : self . save_data = kwargs [ 'save' ] if 'average' in kwargs : self . average = kwargs [ 'average' ] if 'reject' in kwargs : self . reject = kwargs [ 'reject' ] if 'rejectrate' in kwargs : self . rejectrate = kwargs [ 'rejectrate' ] | Sets an internal setting for acquistion using keywords . |
16,315 | def interval_wait ( self ) : now = time . time ( ) elapsed = ( now - self . last_tick ) * 1000 if elapsed < self . interval : time . sleep ( ( self . interval - elapsed ) / 1000 ) now = time . time ( ) elif elapsed > self . interval : pass self . last_tick = now | Pauses the correct amount of time according to this acquisition object s interval setting and the last time this function was called |
16,316 | def putnotify ( self , name , * args ) : self . queues [ name ] [ 0 ] . put ( * args ) self . queues [ name ] [ 1 ] . set ( ) | Puts data into queue and alerts listeners |
16,317 | def loadassignment ( self ) : printtime ( 'Finding taxonomic assignments' , self . start ) for i in range ( self . cpus ) : threads = Thread ( target = self . assignmentload , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . runmetadata . samples : self . loadqueue . put ( sample ) self . loadqueue . join ( ) self . readlist ( ) | Load the taxonomic assignment for each read |
16,318 | def readlist ( self ) : printtime ( 'Sorting reads' , self . start ) for i in range ( self . cpus ) : threads = Thread ( target = self . listread , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . runmetadata . samples : self . listqueue . put ( sample ) self . listqueue . join ( ) self . fastqfilter ( ) | Sort the reads and create lists to be used in creating sorted . fastq files |
16,319 | def fastqfilter ( self ) : printtime ( 'Creating filtered .fastqfiles' , self . start ) for i in range ( self . cpus ) : threads = Thread ( target = self . filterfastq , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . runmetadata . samples : self . filterqueue . put ( sample ) self . filterqueue . join ( ) metadataprinter . MetadataPrinter ( self ) | Filter the reads into separate files based on taxonomic assignment |
16,320 | def remove_escapes ( self ) : chars = [ ] i = 0 while i < len ( self . string ) : char = self . string [ i ] if char == "\\" : i += 1 else : chars . append ( char ) i += 1 return "" . join ( chars ) | Removes everything except number and letters from string |
16,321 | def convert_accents ( self ) : nkfd_form = unicodedata . normalize ( 'NFKD' , self . string ) return "" . join ( [ char for char in nkfd_form if not unicodedata . combining ( char ) ] ) | Removes accents from text |
16,322 | def remove_all ( self , token ) : out = self . string . replace ( " " , token ) while out . find ( token + token ) >= 0 : out = out . replace ( token + token , token ) return out | Removes all occurrences of token |
16,323 | def init_logging ( ) : with open ( os . path . join ( os . path . dirname ( __file__ ) , 'logging.conf' ) , 'r' ) as yf : config = yaml . load ( yf ) logging . config . dictConfig ( config ) | Initialize a logger from a configuration file to use throughout the project |
16,324 | def _clean_page_unique_slug_required ( self , slug ) : if hasattr ( self , 'instance' ) and self . instance . id : if Content . objects . exclude ( page = self . instance ) . filter ( body = slug , type = "slug" ) . count ( ) : raise forms . ValidationError ( self . err_dict [ 'another_page_error' ] ) elif Content . objects . filter ( body = slug , type = "slug" ) . count ( ) : raise forms . ValidationError ( self . err_dict [ 'another_page_error' ] ) return slug | See if this slug exists already |
16,325 | def extract_stack ( start = 0 ) : try : raise ZeroDivisionError except ZeroDivisionError : trace = sys . exc_info ( ) [ 2 ] f = trace . tb_frame . f_back for i in range ( start ) : f = f . f_back stack = [ ] while f is not None : stack . append ( { "line" : f . f_lineno , "file" : f . f_code . co_filename , "method" : f . f_code . co_name } ) f = f . f_back return stack | SNAGGED FROM traceback . py Altered to return Data |
16,326 | def _extract_traceback ( start ) : tb = sys . exc_info ( ) [ 2 ] for i in range ( start ) : tb = tb . tb_next return _parse_traceback ( tb ) | SNAGGED FROM traceback . py |
16,327 | def wrap ( cls , e , stack_depth = 0 ) : if e == None : return Null elif isinstance ( e , ( list , Except ) ) : return e elif is_data ( e ) : e . cause = unwraplist ( [ Except . wrap ( c ) for c in listwrap ( e . cause ) ] ) return Except ( ** e ) else : tb = getattr ( e , '__traceback__' , None ) if tb is not None : trace = _parse_traceback ( tb ) else : trace = _extract_traceback ( 0 ) cause = Except . wrap ( getattr ( e , '__cause__' , None ) ) if hasattr ( e , "message" ) and e . message : output = Except ( context = ERROR , template = text_type ( e . message ) , trace = trace , cause = cause ) else : output = Except ( context = ERROR , template = text_type ( e ) , trace = trace , cause = cause ) trace = extract_stack ( stack_depth + 2 ) output . trace . extend ( trace ) return output | ENSURE THE STACKTRACE AND CAUSAL CHAIN IS CAPTURED PLUS ADD FEATURES OF Except |
16,328 | def determine_elected_candidates_in_order ( self , candidate_votes ) : eligible_by_vote = defaultdict ( list ) for candidate_id , votes in candidate_votes . candidate_votes_iter ( ) : if candidate_id in self . candidates_elected : continue if votes < self . quota : continue eligible_by_vote [ votes ] . append ( candidate_id ) elected = [ ] for votes in reversed ( sorted ( eligible_by_vote ) ) : candidate_ids = eligible_by_vote [ votes ] candidate_ids . sort ( key = self . candidate_order_fn ) if len ( candidate_ids ) == 1 : elected . append ( candidate_ids [ 0 ] ) else : tie_breaker_round = self . find_tie_breaker ( candidate_ids ) if tie_breaker_round is not None : self . results . provision_used ( ActProvision ( "Multiple candidates elected with %d votes. Tie broken from previous totals." % ( votes ) ) ) for candidate_id in reversed ( sorted ( candidate_ids , key = tie_breaker_round . get_vote_count ) ) : elected . append ( candidate_id ) else : self . results . provision_used ( ActProvision ( "Multiple candidates elected with %d votes. Input required from Australian Electoral Officer." % ( votes ) ) ) permutations = list ( itertools . permutations ( candidate_ids ) ) permutations . sort ( ) choice = self . resolve_election_order ( permutations ) for candidate_id in permutations [ choice ] : elected . append ( candidate_id ) return elected | determine all candidates with at least a quota of votes in candidate_votes . returns results in order of decreasing vote count . Any ties are resolved within this method . |
16,329 | def get_initial_totals ( self ) : "determine the initial total for each candidate. only call this at the start of round 1" candidate_votes = { } for candidate_id in self . candidate_ids : candidate_votes [ candidate_id ] = 0 for candidate_id in self . candidate_ids : candidate_votes [ candidate_id ] = self . candidate_bundle_transactions . get_paper_count ( candidate_id ) for candidate_id in candidate_votes : candidate_votes [ candidate_id ] = int ( candidate_votes [ candidate_id ] ) return candidate_votes , 0 , 0 | determine the initial total for each candidate . only call this at the start of round 1 |
16,330 | def bundle_to_next_candidate ( self , bundle ) : ticket_state = bundle . ticket_state while True : ticket_state = TicketState ( ticket_state . preferences , ticket_state . up_to + 1 ) candidate_id = get_preference ( ticket_state ) if candidate_id in self . candidates_elected or candidate_id in self . candidates_excluded : continue return candidate_id , ticket_state | returns the next candidate_it of the next preference expressed in the ticket for this bundle and the next ticket_state after preferences are moved along if the vote exhausts candidate_id will be None |
16,331 | def elect ( self , candidate_aggregates , candidate_id ) : assert ( candidate_id not in self . candidates_elected ) elected_no = len ( self . candidates_elected ) + 1 self . candidates_elected [ candidate_id ] = True transfer_value = 0 excess_votes = paper_count = None if len ( self . candidates_elected ) != self . vacancies : excess_votes = max ( candidate_aggregates . get_vote_count ( candidate_id ) - self . quota , 0 ) assert ( excess_votes >= 0 ) paper_count = self . candidate_bundle_transactions . get_paper_count ( candidate_id ) if paper_count > 0 : transfer_value = fractions . Fraction ( excess_votes , paper_count ) assert ( transfer_value >= 0 ) self . election_distributions_pending . append ( ( candidate_id , transfer_value , excess_votes ) ) self . results . candidate_elected ( CandidateElected ( candidate_id = candidate_id , order = elected_no , excess_votes = excess_votes , paper_count = paper_count , transfer_value = transfer_value ) ) | Elect a candidate updating internal state to track this . Calculate the paper count to be transferred on to other candidates and if required schedule a distribution fo papers . |
16,332 | def find_tie_breaker ( self , candidate_ids ) : for candidate_aggregates in reversed ( self . round_candidate_aggregates ) : candidates_on_vote = defaultdict ( int ) for candidate_id in candidate_ids : votes = candidate_aggregates . get_vote_count ( candidate_id ) candidates_on_vote [ votes ] += 1 if max ( candidates_on_vote . values ( ) ) == 1 : return candidate_aggregates | finds a round in the count history in which the candidate_ids each had different vote counts if no such round exists returns None |
16,333 | def get_candidate_notional_votes ( self , candidate_aggregates , adjustment ) : "aggregate of vote received by each candidate, and the votes received by any candidate lower in the poll" continuing = self . get_continuing_candidates ( candidate_aggregates ) candidates_notional = { } by_votes = self . get_votes_to_candidates ( continuing , candidate_aggregates ) total = adjustment for votes , candidates in sorted ( by_votes . items ( ) , key = lambda x : x [ 0 ] ) : for candidate_id in candidates : candidates_notional [ candidate_id ] = total + votes total += votes * len ( candidates ) return candidates_notional | aggregate of vote received by each candidate and the votes received by any candidate lower in the poll |
16,334 | def check ( self , action , page = None , lang = None , method = None ) : if self . user . is_superuser : return True if action == 'change' : return self . has_change_permission ( page , lang , method ) if action == 'delete' : if not self . delete_page ( ) : return False return True if action == 'add' : if not self . add_page ( ) : return False return True if action == 'freeze' : perm = self . user . has_perm ( 'pages.can_freeze' ) if perm : return True return False if action == 'publish' : perm = self . user . has_perm ( 'pages.can_publish' ) if perm : return True return False return False | Return True if the current user has permission on the page . |
16,335 | def has_change_permission ( self , page , lang , method = None ) : if method != 'POST' : return True if self . change_page ( ) : return True if lang : perm = self . user . has_perm ( 'pages.can_manage_%s' % lang . replace ( '-' , '_' ) ) if perm : return True perm_func = getattr ( self , 'manage (%s)_page' % lang ) if perm_func ( page ) : return True perm_func = getattr ( self , 'manage hierarchy_page' ) if perm_func ( page ) : return True else : for ancestor in page . get_ancestors ( ) : if perm_func ( ancestor ) : return True return False | Return True if the current user has permission to change the page . |
16,336 | def _join_lines ( txt ) : txt = txt or '' val = '' lines = txt . split ( '\n' ) for line in lines : stripped = line . strip ( ) if len ( stripped ) == 0 : continue val += stripped + ' ' return val . strip ( ) | Remove whitespace from XML input |
16,337 | def _parse_desc ( node ) : desc = '' if len ( node ) == 0 : return '<p>' + node . text + '</p>' for n in node : if n . tag == 'p' : desc += '<p>' + _join_lines ( n . text ) + '</p>' elif n . tag == 'ol' or n . tag == 'ul' : desc += '<ul>' for c in n : if c . tag == 'li' : desc += '<li>' + _join_lines ( c . text ) + '</li>' else : raise ParseError ( 'Expected <li> in <%s>, got <%s>' % ( n . tag , c . tag ) ) desc += '</ul>' else : raise ParseError ( 'Expected <p>, <ul>, <ol> in <%s>, got <%s>' % ( node . tag , n . tag ) ) return desc | A quick n dirty description parser |
16,338 | def validate_description ( xml_data ) : try : root = ET . fromstring ( '<document>' + xml_data + '</document>' ) except StdlibParseError as e : raise ParseError ( str ( e ) ) return _parse_desc ( root ) | Validate the description for validity |
16,339 | def import_description ( text ) : xml = '' is_in_ul = False for line in text . split ( '\n' ) : line = line . strip ( ) if len ( line ) == 0 : continue line_li = _import_description_to_list_element ( line ) if line_li : if not is_in_ul : xml += '<ul>\n' is_in_ul = True xml += '<li>' + _import_description_sentence_case ( line_li ) + '</li>\n' continue if is_in_ul : xml += '</ul>\n' is_in_ul = False xml += '<p>' + _import_description_sentence_case ( line ) + '</p>\n' if is_in_ul : xml += '</ul>\n' return xml | Convert ASCII text to AppStream markup format |
16,340 | def fill_form_field ( self , field_name , field_value ) : self . browser . execute_script ( "document.getElementsByName(\"" + str ( field_name ) + "\")[0].value = \"" + str ( field_value ) + "\"" ) | Fills given field with given value |
16,341 | def fill_login_form ( self , username , username_field , user_password , user_password_field ) : self . fill_form_field ( username_field , username ) self . fill_form_field ( user_password_field , user_password ) | Fills form with login info |
16,342 | def open_scene ( f , kwargs = None ) : defaultkwargs = { 'open' : True } if kwargs is None : kwargs = { } kwargs . update ( defaultkwargs ) fp = f . get_fullpath ( ) mayafile = cmds . file ( fp , ** kwargs ) msg = "Successfully opened file %s with arguments: %s" % ( fp , kwargs ) return ActionStatus ( ActionStatus . SUCCESS , msg , returnvalue = mayafile ) | Opens the given JB_File |
16,343 | def import_all_references ( arg , kwargs = None ) : defaultkwargs = { 'importReference' : True } if kwargs is None : kwargs = { } kwargs . update ( defaultkwargs ) imported = [ ] refs = cmds . file ( query = True , reference = True ) while refs : for rfile in refs : cmds . file ( rfile , ** kwargs ) imported . append ( rfile ) refs = cmds . file ( query = True , reference = True ) msg = "Successfully imported references %s with arguments: %s" % ( imported , kwargs ) return ActionStatus ( ActionStatus . SUCCESS , msg , returnvalue = imported ) | Import all references in the currently open scene |
16,344 | def update_scenenode ( f ) : n = get_current_scene_node ( ) if not n : msg = "Could not find a scene node." return ActionStatus ( ActionStatus . FAILURE , msg ) tfi = f . get_obj ( ) assert tfi tf = dj . taskfiles . get ( task = tfi . task , releasetype = tfi . releasetype , version = tfi . version , descriptor = tfi . descriptor , typ = tfi . typ ) cmds . setAttr ( '%s.taskfile_id' % n , lock = False ) cmds . setAttr ( '%s.taskfile_id' % n , tf . pk ) cmds . setAttr ( '%s.taskfile_id' % n , lock = True ) msg = "Successfully updated scene node to %s" % tf . id return ActionStatus ( ActionStatus . SUCCESS , msg ) | Set the id of the current scene node to the id for the given file |
16,345 | def call ( args , stdout = PIPE , stderr = PIPE ) : p = Popen ( args , stdout = stdout , stderr = stderr ) out , err = p . communicate ( ) try : return out . decode ( sys . stdout . encoding ) , err . decode ( sys . stdout . encoding ) except Exception : return out , err | Calls the given arguments in a seperate process and returns the contents of standard out . |
16,346 | def make ( self ) : eval = self . command . eval ( ) with open ( self . filename , 'w' ) as f : f . write ( eval ) | Evaluate the command and write it to a file . |
16,347 | def set_default_args ( self , default_args ) : for name , args in default_args . items ( ) : command = self [ name ] command . default_args = default_args . get ( command . name ) or { } | Set default args for commands in collection . |
16,348 | def extract_traits ( self , entity ) : traits = getattr ( entity , self . _characteristic ) if traits is not None and isinstance ( traits , Hashable ) : traits = [ traits ] return Trait ( traits , getattr ( entity , self . _characteristic + '_match' , True ) ) | Extract data required to classify entity . |
16,349 | def add ( self , entity ) : characteristic = self . extract_traits ( entity ) if not characteristic . traits : return if characteristic . is_matching : self . add_match ( entity , * characteristic . traits ) else : self . add_mismatch ( entity , * characteristic . traits ) | Add entity to index . |
16,350 | def remove ( self , entity ) : empty_traits = set ( ) self . mismatch_unknown . discard ( entity ) for trait , entities in self . index . items ( ) : entities . discard ( entity ) if not entities : empty_traits . add ( trait ) for empty_trait in empty_traits : del self . index [ empty_trait ] | Remove entity from the MatchBox . |
16,351 | def get_host_certificate ( host , port = 443 ) : ip_addr = socket . gethostbyname ( host ) sock = socket . socket ( ) context = SSL . Context ( SSL . TLSv1_METHOD ) context . set_options ( SSL . OP_NO_SSLv2 ) context . load_verify_locations ( certifi . where ( ) , None ) ssl_sock = SSL . Connection ( context , sock ) ssl_sock . connect ( ( ip_addr , port ) ) ssl_sock . do_handshake ( ) return ssl_sock . get_peer_certificate ( ) | Get a host s certificate . |
16,352 | def get_inner_keys ( dictionary ) : keys = [ ] for key in dictionary . keys ( ) : inner_keys = dictionary [ key ] . keys ( ) keys += [ key + " " + inner_key for inner_key in inner_keys ] return keys | Gets 2nd - level dictionary keys |
16,353 | def get_inner_data ( dictionary ) : out = { } for key in dictionary . keys ( ) : inner_keys = dictionary [ key ] . keys ( ) for inner_key in inner_keys : new_key = key + " " + inner_key out [ new_key ] = dictionary [ key ] [ inner_key ] return out | Gets 2nd - level data into 1st - level dictionary |
16,354 | def do_use ( self , args ) : self . instance = args self . prompt = self . instance + '> ' archive = self . _client . get_archive ( self . instance ) self . streams = [ s . name for s in archive . list_streams ( ) ] self . tables = [ t . name for t in archive . list_tables ( ) ] | Use another instance provided as argument . |
16,355 | def update_label ( self ) : current_file = str ( self . selectedFiles ( ) [ 0 ] ) if not '.' in current_file . split ( os . path . sep ) [ - 1 ] : current_file += '.hdf5' if os . path . isfile ( current_file ) : self . setLabelText ( QtGui . QFileDialog . Accept , 'Reload' ) elif os . path . isdir ( current_file ) : self . setLabelText ( QtGui . QFileDialog . Accept , 'Open' ) else : self . setLabelText ( QtGui . QFileDialog . Accept , 'Create' ) | Updates the text on the accept button to reflect if the name of the data file will result in opening an existing file or creating a new one |
16,356 | def abs_path ( path , format_kwargs = { } , relative_to = None , keep_slash = False ) : if format_kwargs : path = path . format_map ( format_kwargs ) has_slash = path . endswith ( os . sep ) if os . path . isabs ( path ) : path = os . path . normpath ( path ) elif ':' in path : path = asset_path ( path , keep_slash = False ) else : path = os . path . expanduser ( path ) if relative_to : path = os . path . join ( relative_to , path ) path = os . path . abspath ( path ) path = os . path . normpath ( path ) if has_slash and keep_slash : path = '{path}{slash}' . format ( path = path , slash = os . sep ) return path | Get abs . path for path . |
16,357 | def paths_to_str ( paths , format_kwargs = { } , delimiter = os . pathsep , asset_paths = False , check_paths = False ) : if not paths : return '' if isinstance ( paths , str ) : paths = paths . split ( delimiter ) processed_paths = [ ] for path in paths : original = path path = path . format_map ( format_kwargs ) if not os . path . isabs ( path ) : if asset_paths and ':' in path : try : path = asset_path ( path ) except ValueError : path = None if path is not None and os . path . isdir ( path ) : processed_paths . append ( path ) elif check_paths : f = locals ( ) printer . warning ( 'Path does not exist: {path} (from {original})' . format_map ( f ) ) return delimiter . join ( processed_paths ) | Convert paths to a single string . |
16,358 | def index ( ) : for k , v in current_index . items ( ) : current_index [ k ] = 0 logging . info ( "Dashboard refreshed" ) return render_template ( "crystal_dashboard.html" ) | Renders the dashboard when the server is initially run . |
16,359 | def update ( ) : assert request . method == "POST" , "POST request expected received {}" . format ( request . method ) if request . method == 'POST' : selected_run = request . form [ 'selected_run' ] variable_names = utils . get_variables ( selected_run ) . items ( ) if len ( current_index ) < 1 : for _ , v_n in variable_names : current_index [ v_n ] = 0 logging . info ( "Current index: {}" . format ( current_index ) ) data = utils . get_variable_update_dicts ( current_index , variable_names , selected_run ) return jsonify ( data ) | Called by XMLHTTPrequest function periodically to get new graph data . |
16,360 | def get_projects ( ) : assert request . method == "GET" , "GET request expected received {}" . format ( request . method ) try : if request . method == 'GET' : projects = utils . get_projects ( ) return jsonify ( projects ) except Exception as e : logging . error ( e ) return jsonify ( { "0" : "__EMPTY" } ) | Send a dictionary of projects that are available on the database . |
16,361 | def get_runs ( ) : assert request . method == "POST" , "POST request expected received {}" . format ( request . method ) if request . method == "POST" : try : selected_project = request . form [ "selected_project" ] runs = utils . get_runs ( selected_project ) return jsonify ( runs ) except Exception as e : logging . error ( e ) return jsonify ( { "0" : "__EMPTY" } ) | Send a dictionary of runs associated with the selected project . |
16,362 | def get_variables ( ) : assert request . method == "POST" , "POST request expected received {}" . format ( request . method ) if request . method == "POST" : try : selected_run = request . form [ "selected_run" ] variables = utils . get_variables ( selected_run ) variable_names = variables . items ( ) global current_index current_index = { } if len ( current_index ) < 1 : for _ , v_n in variable_names : current_index [ "{}" . format ( v_n ) ] = 0 return jsonify ( variables ) except Exception as e : logging . error ( e ) return jsonify ( { "0" : "__EMPTY" } ) | Send a dictionary of variables associated with the selected run . |
16,363 | def install_completion ( shell : arg ( choices = ( 'bash' , 'fish' ) , help = 'Shell to install completion for' ) , to : arg ( help = '~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish' ) = None , overwrite : 'Overwrite if exists' = False ) : if shell == 'bash' : source = 'runcommands:completion/bash/runcommands.rc' to = to or '~/.bashrc.d' elif shell == 'fish' : source = 'runcommands:completion/fish/runcommands.fish' to = to or '~/.config/fish/runcommands.fish' source = asset_path ( source ) destination = os . path . expanduser ( to ) if os . path . isdir ( destination ) : destination = os . path . join ( destination , os . path . basename ( source ) ) printer . info ( 'Installing' , shell , 'completion script to:\n ' , destination ) if os . path . exists ( destination ) : if overwrite : printer . info ( 'Overwriting:\n {destination}' . format_map ( locals ( ) ) ) else : message = 'File exists. Overwrite?' . format_map ( locals ( ) ) overwrite = confirm ( message , abort_on_unconfirmed = True ) copy_file ( source , destination ) printer . info ( 'Installed; remember to:\n source {destination}' . format_map ( locals ( ) ) ) | Install command line completion script . |
16,364 | def synthesize ( self , modules , use_string , x64 , native ) : print ( hash_func ) groups = group_by ( modules , ends_with_punctuation ) sources = self . make_source ( groups , self . database ) if sources : return stylify_files ( { 'defs.h' : sources [ 0 ] , 'init.c' : sources [ 1 ] } ) else : return '' | Transform sources . |
16,365 | def make_source ( self , groups , code_opts , gen_opts ) : modules = self . make_modules ( groups , code_opts ) var_decls = modules . var_decls relocs = AttrsGetter ( modules . relocs ) x86 , x64 = relocs . get_attrs ( 'x86' , 'x64' ) if code_opts . windll : structs , x86_reloc , x64_reloc = make_windll ( modules . structs ) x86 += x86_reloc x64 += x64_reloc else : structs = '' . join ( modules . structs ) c_relocs = reloc_both ( relocs . strings + x86 , x64 ) data = var_decls . strip ( ) c_header = make_c_header ( gen_opts . filename , 'NOTICE' , modules . typedefs + structs + data ) c_source = make_init ( modules . hashes + c_relocs + modules . libprocs , callable ( code_opts . hash_func ) ) return [ c_header , c_source ] | Build the final source code for all modules . |
16,366 | def make_modules ( self , groups , code_opts ) : modules = [ ] for raw_module , raw_funcs in groups : module = raw_module [ 0 ] . strip ( ) . strip ( string . punctuation ) funcs = [ func . strip ( ) for func in raw_funcs ] args = [ self . database . query_args ( func , raw = True ) for func in funcs ] if self . generic : args = [ arg if arg else ( 'VOID *' , [ ] ) for arg in args ] else : args = [ arg for arg in args if arg ] if not args : logging . info ( _ ( '%s not found.' ) , module ) continue logging . debug ( module ) module = ModuleSource ( module , zip ( funcs , args ) , code_opts ) modules . append ( module . c_source ( ) ) return AttrsGetter ( modules ) | Build shellcoding files for the module . |
16,367 | def c_source ( self ) : relocs = Relocs ( '' . join ( self . c_self_relocs ( ) ) , * self . c_module_relocs ( ) ) return Source ( '' . join ( self . c_typedefs ( ) ) , '' if self . opts . no_structs else self . c_struct ( ) , '' . join ( self . c_hashes ( ) ) , '' . join ( self . c_var_decls ( ) ) , relocs , self . c_loadlib ( ) + '' . join ( self . c_getprocs ( ) ) ) | Return strings . |
16,368 | def c_typedefs ( self ) : defs = [ ] attrs = self . opts . attrs + '\n' if self . opts . attrs else '' for name , args in self . funcs : logging . debug ( 'name: %s args: %s' , name , args ) defs . append ( 'typedef\n{}\n{}{}({});\n' . format ( args [ 0 ] , attrs , self . _c_type_name ( name ) , make_c_args ( args [ 2 ] ) ) ) return defs | Get the typedefs of the module . |
16,369 | def c_struct ( self ) : member = '\n' . join ( self . c_member_funcs ( True ) ) if self . opts . windll : return 'struct {{\n{}{} }} {};\n' . format ( self . _c_dll_base ( ) , member , self . name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n' . format ( self . _c_dll_base ( ) , member , * self . _c_struct_names ( ) ) | Get the struct of the module . |
16,370 | def c_hashes ( self ) : if callable ( self . opts . hash_func ) : hashes = [ '# define {}{} {}\n' . format ( self . opts . prefix , name , self . opts . hash_func ( name ) ) for name , dummy_args in self . funcs ] else : hashes = [ make_c_str ( self . opts . prefix + name , name ) for name , dummy_args in self . funcs ] if self . name != 'kernel32' : hashes = [ make_c_str ( self . opts . prefix + self . name , self . name ) ] + hashes return hashes | Get the hashes of the module including functions and DLLs . |
16,371 | def c_self_relocs ( self ) : relocs = [ ] if not callable ( self . opts . hash_func ) : relocs = [ reloc_ptr ( self . opts . prefix + name , self . opts . reloc_delta , 'char *' ) for name , dummy_args in self . funcs ] if self . name != 'kernel32' : relocs = [ reloc_ptr ( self . opts . prefix + self . name , self . opts . reloc_delta , 'char *' ) ] + relocs return relocs | Build relocation for strings . |
16,372 | def c_var_decls ( self ) : if self . opts . no_structs : mod_decl = 'HMODULE {} = NULL;\n' . format ( self . name ) return [ mod_decl ] + [ '{} *{} = NULL;\n' . format ( self . _c_type_name ( name ) , name ) for name , dummy_args in self . funcs ] if self . opts . windll : return '' return [ '{} _{} = {{ 0 }};\n' . format ( self . _c_struct_names ( ) [ 1 ] , self . name ) ] | Get the needed variable definitions . |
16,373 | def c_module_relocs ( self ) : if self . opts . no_structs or self . opts . windll : return '' , '' x86 = reloc_var ( self . name , self . _c_struct_names ( ) [ 1 ] , self . opts . reloc_delta , self . _c_uses_pointer ( ) ) x64 = '{0} *{1} = &_{1};\n' . format ( self . _c_struct_names ( ) [ 1 ] , self . name ) if self . _c_uses_pointer ( ) else '' return x86 , x64 | Build relocation for the module variable . |
16,374 | def c_loadlib ( self ) : name = self . _c_base_var ( ) kernel32 = 'windll->kernel32.' if self . name == 'kernel32' : loadlib = '{} = get_kernel32_base();\n' . format ( 'kernel32' if self . opts . no_structs else kernel32 + self . opts . base ) else : loadlib = '{} = {}LoadLibraryA({}{});\n' . format ( name , '' if self . opts . no_structs else kernel32 , self . opts . prefix , self . name ) return loadlib + self . _c_null_check ( name ) | Get the loadlib of the module . |
16,375 | def c_getprocs ( self ) : getprocs = [ ] for name , dummy_args in self . funcs : if name == 'GetProcAddress' : if callable ( self . opts . hash_func ) : continue getter = 'get_proc_by_string' elif self . opts . no_structs : getter = 'GetProcAddress' else : getter = 'windll->kernel32.GetProcAddress' if callable ( self . opts . hash_func ) : getter = 'get_proc_by_hash' if self . opts . no_structs : var = name else : var = 'windll->{}.{}' . format ( self . name , name ) getproc = '{} = ({} *){}({}, {}{});\n' . format ( var , self . _c_type_name ( name ) , getter , self . _c_base_var ( ) , self . opts . prefix , name ) getprocs . append ( getproc + self . _c_null_check ( var ) ) return getprocs | Get the getprocs of the module . |
16,376 | def c_member_funcs ( self , for_struct = False ) : decls = [ '{} *{};' . format ( self . _c_type_name ( name ) , name ) for name , dummy_args in self . funcs ] if for_struct : return decls return [ self . _c_mod_decl ( ) ] + decls | Get the decls of the module . |
16,377 | def _c_base_var ( self ) : if self . opts . no_structs : return self . name return 'windll->{}.{}' . format ( self . name , self . opts . base ) | Return the name of the module base variable . |
16,378 | def get_precursor_mz ( exact_mass , precursor_type ) : d = { '[M-H]-' : - 1.007276 , '[M+H]+' : 1.007276 , '[M+H-H2O]+' : 1.007276 - ( ( 1.007276 * 2 ) + 15.9949 ) } try : return exact_mass + d [ precursor_type ] except KeyError as e : print ( e ) return False | Calculate precursor mz based on exact mass and precursor type |
16,379 | def line_count ( fn ) : with open ( fn ) as f : for i , l in enumerate ( f ) : pass return i + 1 | Get line count of file |
16,380 | def amplitude ( self , caldb , calv , atten = 0 ) : amp = ( 10 ** ( float ( self . _intensity + atten - caldb ) / 20 ) * calv ) return amp | Calculates the voltage amplitude for this stimulus using internal intensity value and the given reference intensity & voltage |
16,381 | def verify ( self , ** kwargs ) : if 'duration' in kwargs : if kwargs [ 'duration' ] < self . _duration : return "Window size must equal or exceed stimulus length" if self . _risefall > self . _duration : return "Rise and fall times exceed component duration" return 0 | Checks this component for invalidating conditions |
16,382 | def stateDict ( self ) : state = { 'duration' : self . _duration , 'intensity' : self . _intensity , 'risefall' : self . _risefall , 'stim_type' : self . name } return state | Saves internal values to be loaded later |
16,383 | def loadState ( self , state ) : self . _duration = state [ 'duration' ] self . _intensity = state [ 'intensity' ] self . _risefall = state [ 'risefall' ] | Loads previously saved values to this component . |
16,384 | def initiate ( self , callback = None ) : return initiate ( self . mw_uri , self . consumer_token , callback = callback or self . callback , user_agent = self . user_agent ) | Initiate an OAuth handshake with MediaWiki . |
16,385 | def _load_variable ( func , program_id , index ) : n = 64 bufsize = GLsizei ( n ) length = pointer ( GLsizei ( 0 ) ) size = pointer ( GLint ( 0 ) ) type = pointer ( GLenum ( 0 ) ) uname = create_string_buffer ( n ) func ( program_id , index , bufsize , length , size , type , uname ) return size [ 0 ] , type [ 0 ] , uname . value . decode ( 'utf8' ) | Loads the meta data for a uniform or attribute |
16,386 | def addWidget ( self , widget , name ) : self . exploreStimTypeCmbbx . addItem ( name ) self . componentStack . addWidget ( widget ) widget . valueChanged . connect ( self . valueChanged . emit ) | Add a component editor widget |
16,387 | def saveTemplate ( self ) : savedict = { } for comp_editor in self . widgets ( ) : stim = comp_editor . component ( ) comp_editor . saveToObject ( ) savedict [ stim . name ] = stim . stateDict ( ) savedict [ 'delay' ] = self . delaySpnbx . value ( ) return savedict | Get a json structure of the current inputs to be able to load later |
16,388 | def expand_short_options ( self , argv ) : new_argv = [ ] for arg in argv : result = self . parse_multi_short_option ( arg ) new_argv . extend ( result ) return new_argv | Convert grouped short options like - abc to - a - b - c . |
16,389 | def find_arg ( self , name ) : name = self . normalize_name ( name ) return self . args . get ( name ) | Find arg by normalized arg name or parameter name . |
16,390 | def find_parameter ( self , name ) : name = self . normalize_name ( name ) arg = self . args . get ( name ) return None if arg is None else arg . parameter | Find parameter by name or normalized arg name . |
16,391 | def args ( self ) : params = self . parameters args = OrderedDict ( ) args [ 'help' ] = HelpArg ( command = self ) normalize_name = self . normalize_name get_arg_config = self . get_arg_config get_short_option = self . get_short_option_for_arg get_long_option = self . get_long_option_for_arg get_inverse_option = self . get_inverse_option_for_arg names = { normalize_name ( name ) for name in params } used_short_options = set ( ) for param in params . values ( ) : annotation = get_arg_config ( param ) short_option = annotation . short_option if short_option : used_short_options . add ( short_option ) for name , param in params . items ( ) : name = normalize_name ( name ) skip = ( name . startswith ( '_' ) or param . kind is param . VAR_KEYWORD or param . kind is param . KEYWORD_ONLY ) if skip : continue annotation = get_arg_config ( param ) container = annotation . container type = annotation . type choices = annotation . choices help = annotation . help inverse_help = annotation . inverse_help short_option = annotation . short_option long_option = annotation . long_option inverse_option = annotation . inverse_option action = annotation . action nargs = annotation . nargs default = param . default if default is not param . empty : if not short_option : short_option = get_short_option ( name , names , used_short_options ) used_short_options . add ( short_option ) if not long_option : long_option = get_long_option ( name ) if not inverse_option : inverse_option = get_inverse_option ( long_option ) args [ name ] = Arg ( command = self , parameter = param , name = name , container = container , type = type , default = default , choices = choices , help = help , inverse_help = inverse_help , short_option = short_option , long_option = long_option , inverse_option = inverse_option , action = action , nargs = nargs , ) option_map = OrderedDict ( ) for arg in args . values ( ) : for option in arg . options : option_map . setdefault ( option , [ ] ) option_map [ option ] . append ( arg ) for option , option_args in option_map . items ( ) : if len ( option_args ) > 1 : names = ', ' . join ( a . parameter . name for a in option_args ) message = ( 'Option {option} of command {self.name} maps to multiple parameters: {names}' ) message = message . format_map ( locals ( ) ) raise CommandError ( message ) return args | Create args from function parameters . |
16,392 | def option_map ( self ) : option_map = OrderedDict ( ) for arg in self . args . values ( ) : for option in arg . options : option_map [ option ] = arg return option_map | Map command - line options to args . |
16,393 | def objectprep ( self ) : if self . bcltofastq : if self . customsamplesheet : assert os . path . isfile ( self . customsamplesheet ) , 'Cannot find custom sample sheet as specified {}' . format ( self . customsamplesheet ) self . samples = fastqCreator . CreateFastq ( self ) samples_dict = vars ( self . samples ) self . index = samples_dict [ 'index' ] self . index_length = samples_dict [ 'indexlength' ] self . forward = samples_dict [ 'forwardlength' ] self . reverse = samples_dict [ 'reverselength' ] self . forwardlength = samples_dict [ 'forward' ] self . reverselength = samples_dict [ 'reverse' ] self . header = samples_dict [ 'header' ] else : self . samples = createObject . ObjectCreation ( self ) | Creates fastq files from an in - progress Illumina MiSeq run or create an object and moves files appropriately |
16,394 | def fileprep ( self ) : for i in range ( self . cpus ) : threads = Thread ( target = self . prep , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata : sample . general . combined = os . path . join ( sample . general . outputdirectory , '{sample_name}_combined.fastq' . format ( sample_name = sample . name ) ) self . queue . put ( sample ) self . queue . join ( ) | Decompress and concatenate . fastq files |
16,395 | def chunked_join ( iterable , int1 , int2 , str1 , str2 , func ) : chunks = list ( chunked ( iterable , int1 ) ) logging . debug ( chunks ) groups = [ list ( chunked ( chunk , int2 ) ) for chunk in chunks ] logging . debug ( groups ) return str1 . join ( [ str2 . join ( [ func ( '' . join ( chunk ) ) for chunk in chunks ] ) for chunks in groups ] ) | Chunk and join . |
16,396 | def bytes_to_c_string ( data ) : rows = chunked_join ( data , 20 , 2 , '"\n "' , '' , r'\x' + X ) logging . debug ( _ ( 'Returning rows: %s' ) , rows ) return '"{}";' . format ( rows ) | Convert the hexadecimal string in to C - style string . |
16,397 | def bytes_to_c_array ( data ) : chars = [ "'{}'" . format ( encode_escape ( i ) ) for i in decode_escape ( data ) ] return ', ' . join ( chars ) + ', 0' | Make a C array using the given string . |
16,398 | def uni_from ( cls , source , * args , ** kwargs ) : logging . debug ( _ ( 'source: %s, args: %s, kwargs: %s' ) , source , args , kwargs ) return getattr ( cls , cls . cons_dict [ source ] ) ( * args , ** kwargs ) | Unified from . |
16,399 | def uni_to ( self , target , * args , ** kwargs ) : logging . debug ( _ ( 'target: %s, args: %s, kwargs: %s' ) , target , args , kwargs ) return getattr ( self , self . func_dict [ target ] ) ( * args , ** kwargs ) | Unified to . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.