idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
1,400
|
def smooth_rectangle ( x , y , rec_w , rec_h , gaussian_width_x , gaussian_width_y ) : gaussian_x_coord = abs ( x ) - rec_w / 2.0 gaussian_y_coord = abs ( y ) - rec_h / 2.0 box_x = np . less ( gaussian_x_coord , 0.0 ) box_y = np . less ( gaussian_y_coord , 0.0 ) sigmasq_x = gaussian_width_x * gaussian_width_x sigmasq_y = gaussian_width_y * gaussian_width_y with float_error_ignore ( ) : falloff_x = x * 0.0 if sigmasq_x == 0.0 else np . exp ( np . divide ( - gaussian_x_coord * gaussian_x_coord , 2 * sigmasq_x ) ) falloff_y = y * 0.0 if sigmasq_y == 0.0 else np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq_y ) ) return np . minimum ( np . maximum ( box_x , falloff_x ) , np . maximum ( box_y , falloff_y ) )
|
Rectangle with a solid central region then Gaussian fall - off at the edges .
|
1,401
|
def pack_tups ( * args ) : import numpy as np _DEBUG = False NOT_ITER = - 1 UNINIT_VAL = - 1 if _DEBUG : print ( "args = {0}" . format ( args ) ) class StrNoIter ( str ) : def __iter__ ( self ) : raise NotImplementedError ( "Non-iterable string" ) mod_args = [ ( StrNoIter ( a ) if isinstance ( a , str ) else a ) for a in args ] iterlens = [ ( len ( a ) if iterable ( a ) else NOT_ITER ) for a in mod_args ] maxiter = max ( iterlens ) if not all ( map ( lambda v : v in ( NOT_ITER , maxiter ) , iterlens ) ) : raise ValueError ( "All iterable items must be of equal length" ) if maxiter == NOT_ITER : return [ args ] tups = list ( zip ( * [ ( np . repeat ( a , maxiter ) if l == NOT_ITER else a ) for ( a , l ) in zip ( mod_args , iterlens ) ] ) ) if _DEBUG : print ( "tups = {0}" . format ( tups ) ) return tups
|
Pack an arbitrary set of iterables and non - iterables into tuples .
|
1,402
|
def safe_cast ( invar , totype ) : outvar = totype ( invar ) if not isinstance ( outvar , totype ) : raise TypeError ( "Result of cast to '{0}' is '{1}'" . format ( totype , type ( outvar ) ) ) return outvar
|
Performs a safe typecast .
|
1,403
|
def make_timestamp ( el_time ) : hrs = el_time // 3600.0 mins = ( el_time % 3600.0 ) // 60.0 secs = el_time % 60.0 stamp = "{0}h {1}m {2}s" . format ( int ( hrs ) , int ( mins ) , int ( secs ) ) return stamp
|
Generate an hour - minutes - seconds timestamp from an interval in seconds .
|
1,404
|
def check_geom ( c1 , a1 , c2 , a2 , tol = _DEF . XYZ_COORD_MATCH_TOL ) : from . . const import atom_num import numpy as np from . . const import EnumCheckGeomMismatch as ECGM match = True if not len ( c1 . shape ) == 1 : raise ValueError ( ( "'c1' is not a vector." ) ) if not len ( c2 . shape ) == 1 : raise ValueError ( ( "'c2' is not a vector." ) ) if not len ( a1 . shape ) == 1 : raise ValueError ( ( "'a1' is not a simple list." ) ) if not len ( a2 . shape ) == 1 : raise ValueError ( ( "'a2' is not a simple list." ) ) if not c1 . shape [ 0 ] == 3 * a1 . shape [ 0 ] : raise ValueError ( "len(c1) != 3*len(a1)" ) if not c2 . shape [ 0 ] == 3 * a2 . shape [ 0 ] : raise ValueError ( "len(c2) != 3*len(a2)" ) if not c1 . shape [ 0 ] == c2 . shape [ 0 ] : match = False fail_type = ECGM . DIMENSION return match , fail_type , None fail_loc = np . less_equal ( np . abs ( np . subtract ( c1 , c2 ) ) , tol ) if sum ( fail_loc ) != c2 . shape [ 0 ] : match = False fail_type = ECGM . COORDS return match , fail_type , fail_loc if np . issubdtype ( a1 . dtype , np . dtype ( 'str' ) ) : a1 = np . array ( [ atom_num [ e ] for e in a1 ] ) if np . issubdtype ( a2 . dtype , np . dtype ( 'str' ) ) : a2 = np . array ( [ atom_num [ e ] for e in a2 ] ) fail_loc = np . equal ( a1 , a2 ) if sum ( fail_loc ) != a2 . shape [ 0 ] : match = False fail_type = ECGM . ATOMS return match , fail_type , fail_loc return match , None , None
|
Check for consistency of two geometries and atom symbol lists
|
1,405
|
def template_subst ( template , subs , delims = ( '<' , '>' ) ) : subst_text = template for ( k , v ) in subs . items ( ) : subst_text = subst_text . replace ( delims [ 0 ] + k + delims [ 1 ] , v ) return subst_text
|
Perform substitution of content into tagged string .
|
1,406
|
def assert_npfloatarray ( obj , varname , desc , exc , tc , errsrc ) : import numpy as np if varname is None : var = obj else : try : var = getattr ( obj , varname ) except AttributeError : raise exc ( tc , "Attribute '{0}' not defined in '{1}'" . format ( varname , obj ) , errsrc ) try : dt = var . dtype except AttributeError : raise exc ( tc , "'{0}' is not an np.array (lacks a 'dtype' member)" . format ( desc ) , errsrc ) else : if not var . shape : raise exc ( tc , "'{0}' is not an np.array ('len(shape)' < 1)" . format ( desc ) , errsrc ) if not np . issubdtype ( dt , np . float ) : raise exc ( tc , "'{0}' is not an np.array of np.float" . format ( desc ) , errsrc )
|
Assert a value is an |nparray| of NumPy floats .
|
1,407
|
def advance ( self ) : elem = next ( self . _iterable ) for deque in self . _deques : deque . append ( elem )
|
Advance the base iterator publish to constituent iterators .
|
1,408
|
def _advance_pattern_generators ( self , p ) : valid_generators = [ ] for g in p . generators : for trial in range ( self . max_trials ) : if np . alltrue ( [ self . __distance_valid ( g , v , p ) for v in valid_generators ] ) : valid_generators . append ( g ) break g . force_new_dynamic_value ( 'x' ) g . force_new_dynamic_value ( 'y' ) else : self . warning ( "Unable to place pattern %s subject to given constraints" % g . name ) return valid_generators
|
Advance the parameters for each generator for this presentation .
|
1,409
|
def _advance_params ( self ) : for p in [ 'x' , 'y' , 'direction' ] : self . force_new_dynamic_value ( p ) self . last_time = self . time_fn ( )
|
Explicitly generate new values for these parameters only when appropriate .
|
1,410
|
def register ( self , settings_class = NoSwitcher , * simple_checks , ** conditions ) : if settings_class is NoSwitcher : def decorator ( cls ) : self . register ( cls , * simple_checks , ** conditions ) return cls return decorator available_checks = self . checks . keys ( ) for condition in conditions . keys ( ) : if condition not in available_checks : raise InvalidCondition ( 'There is no check for the condition "%s"' % condition ) self . _registry . append ( ( settings_class , simple_checks , conditions ) )
|
Register a settings class with the switcher . Can be passed the settings class to register or be used as a decorator .
|
1,411
|
def _peek_buffer ( self , i = 0 ) : while len ( self . _buffer ) <= i : self . _buffer . append ( next ( self . _source ) ) return self . _buffer [ i ]
|
Get the next line without consuming it .
|
1,412
|
def _make_readline_peeker ( self ) : counter = itertools . count ( 0 ) def readline ( ) : try : return self . _peek_buffer ( next ( counter ) ) except StopIteration : return '' return readline
|
Make a readline - like function which peeks into the source .
|
1,413
|
def _add_node ( self , node , depth ) : self . _topmost_node . add_child ( node , bool ( depth [ 1 ] ) ) self . _stack . append ( ( depth , node ) )
|
Add a node to the graph and the stack .
|
1,414
|
def _load_data ( self , atom_syms , coords , bohrs = True ) : import numpy as np from . const import atom_num , PHYS from . error import XYZError if 'geoms' in dir ( self ) : raise XYZError ( XYZError . OVERWRITE , "Cannot overwrite contents of existing OpanXYZ" , "" ) if not len ( coords . shape ) == 1 : raise ValueError ( "Coordinates are not a vector" ) if not len ( atom_syms . shape ) == 1 : raise ValueError ( "Atom symbols are not a simple list" ) if not coords . shape [ 0 ] == 3 * atom_syms . shape [ 0 ] : raise ValueError ( "len(coords) != 3 * len(atom_syms)" ) if not all ( ( atom_syms [ i ] . upper ( ) in atom_num ) for i in range ( atom_syms . shape [ 0 ] ) ) : raise ValueError ( "Invalid atoms specified: {0}" . format ( [ ( j , atom_syms [ j ] ) for j in ( i for ( i , valid ) in enumerate ( map ( lambda k : k in atom_num , atom_syms ) ) if not valid ) ] ) ) if not all ( map ( np . isreal , coords ) ) : raise ValueError ( "All coordinates must be real numeric" ) self . num_atoms = atom_syms . shape [ 0 ] self . num_geoms = 1 self . in_str = self . LOAD_DATA_FLAG self . descs = np . array ( [ self . LOAD_DATA_FLAG ] ) self . XYZ_path = self . LOAD_DATA_FLAG self . atom_syms = list ( map ( str . upper , list ( atom_syms ) ) ) self . geoms = [ coords / ( 1.0 if bohrs else PHYS . ANG_PER_BOHR ) ]
|
Internal function for making XYZ object from explicit geom data .
|
1,415
|
def geom_iter ( self , g_nums ) : from . utils import pack_tups vals = pack_tups ( g_nums ) for val in vals : yield self . geom_single ( val [ 0 ] )
|
Iterator over a subset of geometries .
|
1,416
|
def dist_single ( self , g_num , at_1 , at_2 ) : import numpy as np from scipy import linalg as spla from . utils import safe_cast as scast if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) if at_1 == at_2 : dist = 0.0 else : dist = scast ( spla . norm ( self . displ_single ( g_num , at_1 , at_2 ) ) , np . float_ ) return dist
|
Distance between two atoms .
|
1,417
|
def dist_iter ( self , g_nums , ats_1 , ats_2 , invalid_error = False ) : import numpy as np from . utils import pack_tups if _DEBUG : print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) arglist = self . _none_subst ( g_nums , ats_1 , ats_2 ) tups = pack_tups ( * arglist ) if _DEBUG : print ( tups ) for tup in tups : yield self . _iter_return ( tup , self . dist_single , invalid_error )
|
Iterator over selected interatomic distances .
|
1,418
|
def angle_single ( self , g_num , at_1 , at_2 , at_3 ) : import numpy as np from . utils import safe_cast as scast from . utils . vector import vec_angle if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) if not ( - self . num_atoms <= at_3 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_3' ({0})" . format ( at_3 ) ) at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) at_3 = scast ( np . floor ( at_3 ) , np . int_ ) if ( at_2 % self . num_atoms ) == ( at_1 % self . num_atoms ) : raise ValueError ( "'at_1' and 'at_2' must be different" ) if ( at_2 % self . num_atoms ) == ( at_3 % self . num_atoms ) : raise ValueError ( "'at_2' and 'at_3' must be different" ) if ( at_1 % self . num_atoms ) == ( at_3 % self . num_atoms ) : return 0.0 vec_2_1 = self . displ_single ( g_num , at_2 , at_1 ) vec_2_3 = self . displ_single ( g_num , at_2 , at_3 ) angle = vec_angle ( vec_2_1 , vec_2_3 ) return angle
|
Spanning angle among three atoms .
|
1,419
|
def angle_iter ( self , g_nums , ats_1 , ats_2 , ats_3 , invalid_error = False ) : from . utils import pack_tups if _DEBUG : print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) print ( "ats_3 = {0}" . format ( ats_3 ) ) arglist = self . _none_subst ( g_nums , ats_1 , ats_2 , ats_3 ) tups = pack_tups ( * arglist ) if _DEBUG : print ( tups ) for tup in tups : if _DEBUG : print ( tup ) yield self . _iter_return ( tup , self . angle_single , invalid_error )
|
Iterator over selected atomic angles .
|
1,420
|
def dihed_iter ( self , g_nums , ats_1 , ats_2 , ats_3 , ats_4 , invalid_error = False ) : from . utils import pack_tups if _DEBUG : print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) print ( "ats_3 = {0}" . format ( ats_3 ) ) print ( "ats_4 = {0}" . format ( ats_4 ) ) arglist = self . _none_subst ( g_nums , ats_1 , ats_2 , ats_3 , ats_4 ) tups = pack_tups ( * arglist ) if _DEBUG : print ( tups ) for tup in tups : yield self . _iter_return ( tup , self . dihed_single , invalid_error )
|
Iterator over selected dihedral angles .
|
1,421
|
def displ_single ( self , g_num , at_1 , at_2 ) : import numpy as np from . utils import safe_cast as scast if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) if ( at_1 % self . num_atoms ) == ( at_2 % self . num_atoms ) : return np . array ( [ 0.0 , 0.0 , 0.0 ] ) g = self . geom_single ( g_num ) displ = np . array ( [ g [ i + 3 * at_2 ] - g [ i + 3 * at_1 ] for i in range ( 3 ) ] ) return displ
|
Displacement vector between two atoms .
|
1,422
|
def displ_iter ( self , g_nums , ats_1 , ats_2 , invalid_error = False ) : from . utils import pack_tups if _DEBUG : print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) arglist = self . _none_subst ( g_nums , ats_1 , ats_2 ) tups = pack_tups ( * arglist ) if _DEBUG : print ( tups ) for tup in tups : yield self . _iter_return ( tup , self . displ_single , invalid_error )
|
Iterator over indicated displacement vectors .
|
1,423
|
def _none_subst ( self , * args ) : import numpy as np arglist = [ a for a in args ] none_found = False none_vals = list ( map ( lambda e : isinstance ( e , type ( None ) ) , arglist ) ) if np . count_nonzero ( none_vals ) > 1 : raise ValueError ( "Multiple 'None' values [indices {0}] not supported" . format ( tuple ( np . nonzero ( none_vals ) [ 0 ] ) ) ) elif np . count_nonzero ( none_vals ) == 1 : if not all ( np . equal ( list ( map ( np . iterable , arglist ) ) , list ( map ( lambda e : isinstance ( e , str ) , arglist ) ) ) ) : raise ValueError ( "'None' as parameter invalid with non-str iterables" ) none_found = True none_loc = np . nonzero ( none_vals ) [ 0 ] [ 0 ] arglist [ none_loc ] = range ( self . num_geoms if none_loc == 0 else self . num_atoms ) return arglist
|
Helper function to insert full ranges for |None| for X_iter methods .
|
1,424
|
def guess_external_url ( local_host , port ) : if local_host in [ '0.0.0.0' , '::' ] : local_host = socket . getfqdn ( ) match = IPV4_REVERSE_DNS . match ( local_host ) if match : local_host = '.' . join ( reversed ( match . groups ( ) ) ) else : match = IPV6_REVERSE_DNS . match ( local_host ) if match : address_as_int = int ( '' . join ( reversed ( match . groups ( ) ) ) , 16 ) local_host = str ( IPv6Address ( address_as_int ) ) if ':' in local_host : local_host = '[%s]' % local_host . rsplit ( '%' , 1 ) [ 0 ] return 'http://%s:%d/' % ( local_host , port )
|
Return a URL that is most likely to route to local_host from outside .
|
1,425
|
def _check_column_lengths ( self ) : column_lengths_dict = { name : len ( xs ) for ( name , xs ) in self . columns_dict . items ( ) } unique_column_lengths = set ( column_lengths_dict . values ( ) ) if len ( unique_column_lengths ) != 1 : raise ValueError ( "Mismatch between lengths of columns: %s" % ( column_lengths_dict , ) )
|
Make sure columns are of the same length or else DataFrame construction will fail .
|
1,426
|
def new_from_files ( self , basepath , basename , repo , bohrs = False , software = _E_SW . ORCA , repo_clobber = False , ** kwargs ) : import os from os import path as osp from . . xyz import OpanXYZ as OX from . . grad import OrcaEngrad as OE from . . hess import OrcaHess as OH from . repo import OpanAnharmRepo as OR from . . const import EnumDispDirection as E_DDir , EnumFileType as E_FT from . . const import EnumSoftware as E_SW from . . const import DEF from . . error import AnharmError as ANHErr if not self . w_xyz == None : raise ANHErr ( ANHErr . STATUS , "XYZ object is already bound" , "" ) if not self . w_grad == None : raise ANHErr ( ANHErr . STATUS , "GRAD object is already bound" , "" ) if not self . w_hess == None : raise ANHErr ( ANHErr . STATUS , "HESS object is already bound" , "" ) if not self . repo == None : raise ANHErr ( ANHErr . STATUS , "Repository object is already bound" , "" ) self . w_xyz = OX ( osp . join ( basepath , basename + osp . extsep + xyz_ext ) ) self . w_grad = OE ( osp . join ( basepath , basename + osp . extsep + engrad_ext ) , 0 , E_DDir . NO_DISP , 0.0 ) self . w_hess = OH ( osp . join ( basepath , basename + osp . extsep + hess_ext ) , 0 , E_DDir . NO_DISP , 0.0 ) if not isinstance ( repo , str ) : raise TypeError ( "Must create new repository when loading " + "a new dataset." ) if osp . split ( repo [ 0 ] ) > 0 and not osp . isabs ( repo ) : repo = osp . join ( basepath , repo ) if osp . isdir ( repo ) : raise IOError ( "Cannot bind repository -- specified " + "location is a directory" ) if osp . isfile ( repo ) : if repo_clobber : os . remove ( repo ) else : raise IOError ( "Target repository file exists and " + "clobber is disabled." ) self . repo = OR ( repo )
|
Initialize with data from files .
|
1,427
|
def remote_exception ( exc , tb ) : if type ( exc ) in exceptions : typ = exceptions [ type ( exc ) ] return typ ( exc , tb ) else : try : typ = type ( exc . __class__ . __name__ , ( RemoteException , type ( exc ) ) , { 'exception_type' : type ( exc ) } ) exceptions [ type ( exc ) ] = typ return typ ( exc , tb ) except TypeError : return exc
|
Metaclass that wraps exception type in RemoteException
|
1,428
|
def reads_overlapping_variants ( variants , samfile , ** kwargs ) : chromosome_names = set ( samfile . references ) for variant in variants : if variant . contig in chromosome_names : chromosome = variant . contig elif "chr" + variant . contig in chromosome_names : chromosome = "chr" + variant . contig else : logger . warn ( "Chromosome '%s' from variant %s not in alignment file %s" , chromosome , variant , samfile . filename ) yield variant , [ ] continue allele_reads = reads_overlapping_variant ( samfile = samfile , chromosome = chromosome , variant = variant , ** kwargs ) yield variant , allele_reads
|
Generates sequence of tuples each containing a variant paired with a list of AlleleRead objects .
|
1,429
|
def group_reads_by_allele ( allele_reads ) : allele_to_reads_dict = defaultdict ( list ) for allele_read in allele_reads : allele_to_reads_dict [ allele_read . allele ] . append ( allele_read ) return allele_to_reads_dict
|
Returns dictionary mapping each allele s nucleotide sequence to a list of supporting AlleleRead objects .
|
1,430
|
def from_locus_read ( cls , locus_read , n_ref ) : sequence = locus_read . sequence reference_positions = locus_read . reference_positions read_pos_before = locus_read . base0_read_position_before_variant read_pos_after = locus_read . base0_read_position_after_variant ref_pos_before = reference_positions [ read_pos_before ] if ref_pos_before is None : logger . warn ( "Missing reference pos for nucleotide before variant on read: %s" , locus_read ) return None ref_pos_after = reference_positions [ read_pos_after ] if ref_pos_after is None : logger . warn ( "Missing reference pos for nucleotide after variant on read: %s" , locus_read ) return None if n_ref == 0 : if ref_pos_after - ref_pos_before != 1 : logger . debug ( "Positions before (%d) and after (%d) variant should be adjacent on read %s" , ref_pos_before , ref_pos_after , locus_read ) return None ref_positions_for_inserted = reference_positions [ read_pos_before + 1 : read_pos_after ] if any ( insert_pos is not None for insert_pos in ref_positions_for_inserted ) : logger . debug ( "Skipping read, inserted nucleotides shouldn't map to reference" ) return None else : if ref_pos_after - ref_pos_before != n_ref + 1 : logger . debug ( ( "Positions before (%d) and after (%d) variant should be " "adjacent on read %s" ) , ref_pos_before , ref_pos_after , locus_read ) return None nucleotides_at_variant_locus = sequence [ read_pos_before + 1 : read_pos_after ] prefix = sequence [ : read_pos_before + 1 ] suffix = sequence [ read_pos_after : ] prefix , suffix = convert_from_bytes_if_necessary ( prefix , suffix ) prefix , suffix = trim_N_nucleotides ( prefix , suffix ) return cls ( prefix , nucleotides_at_variant_locus , suffix , name = locus_read . name )
|
Given a single LocusRead object return either an AlleleRead or None
|
1,431
|
def most_common_nucleotides ( partitioned_read_sequences ) : counts , variant_column_indices = nucleotide_counts ( partitioned_read_sequences ) max_count_per_column = counts . max ( axis = 0 ) assert len ( max_count_per_column ) == counts . shape [ 1 ] max_nucleotide_index_per_column = np . argmax ( counts , axis = 0 ) assert len ( max_nucleotide_index_per_column ) == counts . shape [ 1 ] nucleotides = [ index_to_dna_nucleotide [ idx ] for idx in max_nucleotide_index_per_column ] other_nucleotide_counts = counts . sum ( axis = 0 ) - max_count_per_column return "" . join ( nucleotides ) , max_count_per_column , other_nucleotide_counts
|
Find the most common nucleotide at each offset to the left and right of a variant .
|
1,432
|
def point_displ ( pt1 , pt2 ) : import numpy as np if not np . iterable ( pt1 ) : pt1 = np . float64 ( np . array ( [ pt1 ] ) ) else : pt1 = np . float64 ( np . array ( pt1 ) . squeeze ( ) ) if not np . iterable ( pt2 ) : pt2 = np . float64 ( np . array ( [ pt2 ] ) ) else : pt2 = np . float64 ( np . array ( pt2 ) . squeeze ( ) ) displ = np . matrix ( np . subtract ( pt2 , pt1 ) ) . reshape ( 3 , 1 ) return displ
|
Calculate the displacement vector between two n - D points .
|
1,433
|
def point_dist ( pt1 , pt2 ) : from scipy import linalg as spla dist = spla . norm ( point_displ ( pt1 , pt2 ) ) return dist
|
Calculate the Euclidean distance between two n - D points .
|
1,434
|
def point_rotate ( pt , ax , theta ) : import numpy as np pt = make_nd_vec ( pt , nd = 3 , t = np . float64 , norm = False ) rot_pt = np . dot ( mtx_rot ( ax , theta , reps = 1 ) , pt ) return rot_pt
|
Rotate a 3 - D point around a 3 - D axis through the origin .
|
1,435
|
def point_reflect ( pt , nv ) : import numpy as np from scipy import linalg as spla pt = make_nd_vec ( pt , nd = 3 , t = np . float64 , norm = False ) refl_pt = np . dot ( mtx_refl ( nv , reps = 1 ) , pt ) return refl_pt
|
Reflect a 3 - D point through a plane intersecting the origin .
|
1,436
|
def geom_reflect ( g , nv ) : import numpy as np g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) refl_g = np . dot ( mtx_refl ( nv , reps = ( g . shape [ 0 ] // 3 ) ) , g ) . reshape ( ( g . shape [ 0 ] , 1 ) ) return refl_g
|
Reflection symmetry operation .
|
1,437
|
def geom_rotate ( g , ax , theta ) : import numpy as np g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) rot_g = np . dot ( mtx_rot ( ax , theta , reps = ( g . shape [ 0 ] // 3 ) ) , g ) . reshape ( ( g . shape [ 0 ] , 1 ) ) return rot_g
|
Rotation symmetry operation .
|
1,438
|
def symm_op ( g , ax , theta , do_refl ) : import numpy as np gx = geom_rotate ( g , ax , theta ) if do_refl : gx = geom_reflect ( gx , ax ) return gx
|
Perform general point symmetry operation on a geometry .
|
1,439
|
def geom_find_rotsymm ( g , atwts , ax , improp , nmax = _DEF . SYMM_MATCH_NMAX , tol = _DEF . SYMM_MATCH_TOL ) : import numpy as np g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) ax = make_nd_vec ( ax , nd = 3 , t = np . float64 , norm = True ) nval = nmax + 1 nfac = 1.0 while nfac > tol and nval > 0 : nval = nval - 1 try : nfac = geom_symm_match ( g , atwts , ax , 2 * np . pi / nval , improp ) except ZeroDivisionError as zde : if nval > 0 : raise zde return nval , nfac
|
Identify highest - order symmetry for a geometry on a given axis .
|
1,440
|
def g_subset ( g , atwts , atwt , digits = _DEF . SYMM_ATWT_ROUND_DIGITS ) : import numpy as np g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) atwts = make_nd_vec ( atwts , nd = None , t = np . float64 , norm = False ) if not ( len ( g ) == 3 * len ( atwts ) ) : raise ValueError ( "Dim mismatch [len(g) != 3*len(ats)]." ) co = np . split ( g , g . shape [ 0 ] // 3 ) cf = [ c for ( c , a ) in zip ( co , atwts ) if np . round ( a , digits ) == np . round ( atwt , digits ) ] if not cf == [ ] : g_sub = np . concatenate ( cf , axis = 0 ) g_sub = g_sub . reshape ( ( g_sub . shape [ 0 ] , 1 ) ) else : g_sub = [ ] return g_sub
|
Extract a subset of a geometry matching a desired atom .
|
1,441
|
def mtx_refl ( nv , reps = 1 ) : import numpy as np from scipy import linalg as spla from . . const import PRM if spla . norm ( nv ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Norm of 'nv' is too small." ) nv = make_nd_vec ( nv , nd = 3 , t = np . float64 , norm = True ) if not np . isscalar ( reps ) : raise ValueError ( "'reps' must be scalar." ) if not np . issubdtype ( type ( reps ) , int ) : raise ValueError ( "'reps' must be an integer." ) if not reps > 0 : raise ValueError ( "'reps' must be a positive integer." ) base_mtx = np . zeros ( shape = ( 3 , 3 ) , dtype = np . float64 ) for i in range ( 3 ) : for j in range ( i , 3 ) : if i == j : base_mtx [ i , j ] = 1 - 2 * nv [ i ] ** 2 else : base_mtx [ i , j ] = base_mtx [ j , i ] = - 2 * nv [ i ] * nv [ j ] refl_mtx = spla . block_diag ( * [ base_mtx for i in range ( reps ) ] ) return refl_mtx
|
Generate block - diagonal reflection matrix about nv .
|
1,442
|
def mtx_rot ( ax , theta , reps = 1 ) : import numpy as np from scipy import linalg as spla from . . const import PRM if spla . norm ( ax ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Norm of 'ax' is too small." ) ax = make_nd_vec ( ax , nd = 3 , t = np . float64 , norm = True ) if not np . isscalar ( reps ) : raise ValueError ( "'reps' must be scalar." ) if not np . issubdtype ( type ( reps ) , int ) : raise ValueError ( "'reps' must be an integer." ) if not reps > 0 : raise ValueError ( "'reps' must be a positive integer." ) if not np . isscalar ( theta ) : raise ValueError ( "'theta' must be scalar." ) mod_lc = np . array ( [ [ 0 , - ax [ 2 ] , ax [ 1 ] ] , [ ax [ 2 ] , 0 , - ax [ 0 ] ] , [ - ax [ 1 ] , ax [ 0 ] , 0 ] ] , dtype = np . float64 ) ax_oprod = np . dot ( ax . reshape ( ( 3 , 1 ) ) , ax . reshape ( ( 1 , 3 ) ) ) base_mtx = np . add ( np . add ( ( 1.0 - np . cos ( theta ) ) * ax_oprod , np . cos ( theta ) * np . eye ( 3 ) ) , np . sin ( theta ) * mod_lc ) rot_mtx = spla . block_diag ( * [ base_mtx for i in range ( reps ) ] ) return rot_mtx
|
Generate block - diagonal rotation matrix about ax .
|
1,443
|
def ff ( items , targets ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : for target , content in bins : if item <= ( target - sum ( content ) ) : content . append ( item ) break else : skip . append ( item ) return bins , skip
|
First - Fit
|
1,444
|
def ffd ( items , targets , ** kwargs ) : sizes = zip ( items , weight ( items , ** kwargs ) ) sizes = sorted ( sizes , key = operator . itemgetter ( 1 ) , reverse = True ) items = map ( operator . itemgetter ( 0 ) , sizes ) return ff ( items , targets )
|
First - Fit Decreasing
|
1,445
|
def mr ( items , targets , ** kwargs ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : capacities = [ target - sum ( content ) for target , content in bins ] weighted = weight ( capacities , ** kwargs ) ( target , content ) , capacity , _ = max ( zip ( bins , capacities , weighted ) , key = operator . itemgetter ( 2 ) ) if item <= capacity : content . append ( item ) else : skip . append ( item ) return bins , skip
|
Max - Rest
|
1,446
|
def bf ( items , targets , ** kwargs ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : containers = [ ] capacities = [ ] for target , content in bins : capacity = target - sum ( content ) if item <= capacity : containers . append ( content ) capacities . append ( capacity - item ) if len ( capacities ) : weighted = zip ( containers , weight ( capacities , ** kwargs ) ) content , _ = min ( weighted , key = operator . itemgetter ( 1 ) ) content . append ( item ) else : skip . append ( item ) return bins , skip
|
Best - Fit
|
1,447
|
def bfd ( items , targets , ** kwargs ) : sizes = zip ( items , weight ( items , ** kwargs ) ) sizes = sorted ( sizes , key = operator . itemgetter ( 1 ) , reverse = True ) items = map ( operator . itemgetter ( 0 ) , sizes ) return bf ( items , targets , ** kwargs )
|
Best - Fit Decreasing
|
1,448
|
def trim_sequences ( variant_sequence , reference_context ) : cdna_prefix = variant_sequence . prefix cdna_alt = variant_sequence . alt cdna_suffix = variant_sequence . suffix if reference_context . strand == "-" : cdna_prefix , cdna_alt , cdna_suffix = ( reverse_complement_dna ( cdna_suffix ) , reverse_complement_dna ( cdna_alt ) , reverse_complement_dna ( cdna_prefix ) ) reference_sequence_before_variant = reference_context . sequence_before_variant_locus reference_sequence_after_variant = reference_context . sequence_after_variant_locus if len ( reference_sequence_before_variant ) > len ( cdna_prefix ) : n_trimmed_from_reference = len ( reference_sequence_before_variant ) - len ( cdna_prefix ) n_trimmed_from_variant = 0 elif len ( reference_sequence_before_variant ) < len ( cdna_prefix ) : n_trimmed_from_variant = len ( cdna_prefix ) - len ( reference_sequence_before_variant ) n_trimmed_from_reference = 0 else : n_trimmed_from_variant = 0 n_trimmed_from_reference = 0 reference_sequence_before_variant = reference_sequence_before_variant [ n_trimmed_from_reference : ] cdna_prefix = cdna_prefix [ n_trimmed_from_variant : ] return ( cdna_prefix , cdna_alt , cdna_suffix , reference_sequence_before_variant , reference_sequence_after_variant , n_trimmed_from_reference )
|
A VariantSequence and ReferenceContext may contain a different number of nucleotides before the variant locus . Furthermore the VariantSequence is always expressed in terms of the positive strand against which it aligned but reference transcripts may have sequences from the negative strand of the genome . Take the reverse complement of the VariantSequence if the ReferenceContext is from negative strand transcripts and trim either sequence to ensure that the prefixes are of the same length .
|
1,449
|
def count_mismatches_before_variant ( reference_prefix , cdna_prefix ) : if len ( reference_prefix ) != len ( cdna_prefix ) : raise ValueError ( "Expected reference prefix '%s' to be same length as %s" % ( reference_prefix , cdna_prefix ) ) return sum ( xi != yi for ( xi , yi ) in zip ( reference_prefix , cdna_prefix ) )
|
Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus .
|
1,450
|
def count_mismatches_after_variant ( reference_suffix , cdna_suffix ) : len_diff = len ( cdna_suffix ) - len ( reference_suffix ) return sum ( xi != yi for ( xi , yi ) in zip ( reference_suffix , cdna_suffix ) ) + max ( 0 , len_diff )
|
Computes the number of mismatching nucleotides between two cDNA sequences after a variant locus .
|
1,451
|
def compute_offset_to_first_complete_codon ( offset_to_first_complete_reference_codon , n_trimmed_from_reference_sequence ) : if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon : return ( offset_to_first_complete_reference_codon - n_trimmed_from_reference_sequence ) else : n_nucleotides_trimmed_after_first_codon = ( n_trimmed_from_reference_sequence - offset_to_first_complete_reference_codon ) frame = n_nucleotides_trimmed_after_first_codon % 3 return ( 3 - frame ) % 3
|
Once we ve aligned the variant sequence to the ReferenceContext we need to transfer reading frame from the reference transcripts to the variant sequences .
|
1,452
|
def match_variant_sequence_to_reference_context ( variant_sequence , reference_context , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant = False , max_trimming_attempts = 2 ) : variant_sequence_in_reading_frame = None for i in range ( max_trimming_attempts + 1 ) : variant_sequence_too_short = ( ( reference_context . strand == "+" and len ( variant_sequence . prefix ) < min_transcript_prefix_length ) or ( reference_context . strand == "-" and len ( variant_sequence . suffix ) < min_transcript_prefix_length ) ) if variant_sequence_too_short : logger . info ( "Variant sequence %s shorter than min allowed %d (iter=%d)" , variant_sequence , min_transcript_prefix_length , i + 1 ) return None variant_sequence_in_reading_frame = VariantSequenceInReadingFrame . from_variant_sequence_and_reference_context ( variant_sequence = variant_sequence , reference_context = reference_context ) if variant_sequence_in_reading_frame is None : return None n_mismatch_before_variant = ( variant_sequence_in_reading_frame . number_mismatches_before_variant ) n_mismatch_after_variant = ( variant_sequence_in_reading_frame . number_mismatches_after_variant ) logger . info ( "Iter #%d/%d: %s" % ( i + 1 , max_trimming_attempts + 1 , variant_sequence_in_reading_frame ) ) total_mismatches = n_mismatch_before_variant if include_mismatches_after_variant : total_mismatches += n_mismatch_after_variant if total_mismatches <= max_transcript_mismatches : return variant_sequence_in_reading_frame logger . info ( ( "Too many mismatches (%d) between variant sequence %s and " "reference context %s (attempt=%d/%d)" ) , n_mismatch_before_variant , variant_sequence , reference_context , i + 1 , max_trimming_attempts + 1 ) current_min_coverage = variant_sequence . min_coverage ( ) logger . info ( "Trimming to subsequence covered by at least %d reads" , current_min_coverage + 1 ) variant_sequence = variant_sequence . trim_by_coverage ( current_min_coverage + 1 ) return None
|
Iteratively trim low - coverage subsequences of a variant sequence until it either matches the given reference context or there are too few nucleotides left in the variant sequence .
|
1,453
|
def _check_codons ( self ) : for stop_codon in self . stop_codons : if stop_codon in self . codon_table : if self . codon_table [ stop_codon ] != "*" : raise ValueError ( ( "Codon '%s' not found in stop_codons, but codon table " "indicates that it should be" ) % ( stop_codon , ) ) else : self . codon_table [ stop_codon ] = "*" for start_codon in self . start_codons : if start_codon not in self . codon_table : raise ValueError ( "Start codon '%s' missing from codon table" % ( start_codon , ) ) for codon , amino_acid in self . codon_table . items ( ) : if amino_acid == "*" and codon not in self . stop_codons : raise ValueError ( "Non-stop codon '%s' can't translate to '*'" % ( codon , ) ) if len ( self . codon_table ) != 64 : raise ValueError ( "Expected 64 codons but found %d in codon table" % ( len ( self . codon_table , ) ) )
|
If codon table is missing stop codons then add them .
|
1,454
|
def copy ( self , name , start_codons = None , stop_codons = None , codon_table = None , codon_table_changes = None ) : new_start_codons = ( self . start_codons . copy ( ) if start_codons is None else start_codons ) new_stop_codons = ( self . stop_codons . copy ( ) if stop_codons is None else stop_codons ) new_codon_table = ( self . codon_table . copy ( ) if codon_table is None else codon_table ) if codon_table_changes is not None : new_codon_table . update ( codon_table_changes ) return GeneticCode ( name = name , start_codons = new_start_codons , stop_codons = new_stop_codons , codon_table = new_codon_table )
|
Make copy of this GeneticCode object with optional replacement values for all fields .
|
1,455
|
def start ( self ) : self . running = True self . thread = threading . Thread ( target = self . _main_loop ) self . thread . start ( )
|
Start listening to changes
|
1,456
|
def subscribe ( self , field_names ) : available_controls = dict ( self . raildriver . get_controller_list ( ) ) . values ( ) for field in field_names : if field not in available_controls : raise ValueError ( 'Cannot subscribe to a missing controller {}' . format ( field ) ) self . subscribed_fields = field_names
|
Subscribe to given fields .
|
1,457
|
def set_matrix_dimensions ( self , bounds , xdensity , ydensity ) : self . bounds = bounds self . xdensity = xdensity self . ydensity = ydensity scs = SheetCoordinateSystem ( bounds , xdensity , ydensity ) for of in self . output_fns : if isinstance ( of , TransferFn ) : of . initialize ( SCS = scs , shape = scs . shape )
|
Change the dimensions of the matrix into which the pattern will be drawn . Users of this class should call this method rather than changing the bounds xdensity and ydensity parameters directly . Subclasses can override this method to update any internal data structures that may depend on the matrix dimensions .
|
1,458
|
def state_push ( self ) : "Save the state of the output functions, to be restored with state_pop." for of in self . output_fns : if hasattr ( of , 'state_push' ) : of . state_push ( ) super ( PatternGenerator , self ) . state_push ( )
|
Save the state of the output functions to be restored with state_pop .
|
1,459
|
def state_pop ( self ) : "Restore the state of the output functions saved by state_push." for of in self . output_fns : if hasattr ( of , 'state_pop' ) : of . state_pop ( ) super ( PatternGenerator , self ) . state_pop ( )
|
Restore the state of the output functions saved by state_push .
|
1,460
|
def pil ( self , ** params_to_override ) : from PIL . Image import fromarray nchans = self . num_channels ( ) if nchans in [ 0 , 1 ] : mode , arr = None , self ( ** params_to_override ) arr = ( 255.0 / arr . max ( ) * ( arr - arr . min ( ) ) ) . astype ( np . uint8 ) elif nchans in [ 3 , 4 ] : mode = 'RGB' if nchans == 3 else 'RGBA' arr = np . dstack ( self . channels ( ** params_to_override ) . values ( ) [ 1 : ] ) arr = ( 255.0 * arr ) . astype ( np . uint8 ) else : raise ValueError ( "Unsupported number of channels" ) return fromarray ( arr , mode )
|
Returns a PIL image for this pattern overriding parameters if provided .
|
1,461
|
def state_push ( self ) : super ( Composite , self ) . state_push ( ) for gen in self . generators : gen . state_push ( )
|
Push the state of all generators
|
1,462
|
def state_pop ( self ) : super ( Composite , self ) . state_pop ( ) for gen in self . generators : gen . state_pop ( )
|
Pop the state of all generators
|
1,463
|
def function ( self , p ) : generators = self . _advance_pattern_generators ( p ) assert hasattr ( p . operator , 'reduce' ) , repr ( p . operator ) + " does not support 'reduce'." patterns = [ pg ( xdensity = p . xdensity , ydensity = p . ydensity , bounds = p . bounds , mask = p . mask , x = p . x + p . size * ( pg . x * np . cos ( p . orientation ) - pg . y * np . sin ( p . orientation ) ) , y = p . y + p . size * ( pg . x * np . sin ( p . orientation ) + pg . y * np . cos ( p . orientation ) ) , orientation = pg . orientation + p . orientation , size = pg . size * p . size ) for pg in generators ] image_array = p . operator . reduce ( patterns ) return image_array
|
Constructs combined pattern out of the individual ones .
|
1,464
|
def compile_column ( name : str , data_type : str , nullable : bool ) -> str : null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},' . format ( name = name , data_type = data_type , null = null_str )
|
Create column definition statement .
|
1,465
|
def create ( self , no_data = False ) : if self . query : ddl_statement = self . compile_create_as ( ) else : ddl_statement = self . compile_create ( ) if no_data : ddl_statement += '\nWITH NO DATA' return ddl_statement , self . query_values
|
Declare materalized view .
|
1,466
|
def predicted_effects_for_variant ( variant , transcript_id_whitelist = None , only_coding_changes = True ) : effects = [ ] for transcript in variant . transcripts : if only_coding_changes and not transcript . complete : logger . info ( "Skipping transcript %s for variant %s because it's incomplete" , transcript . name , variant ) continue if transcript_id_whitelist and transcript . id not in transcript_id_whitelist : logger . info ( "Skipping transcript %s for variant %s because it's not one of %d allowed" , transcript . name , variant , len ( transcript_id_whitelist ) ) continue effects . append ( variant . effect_on_transcript ( transcript ) ) effects = EffectCollection ( effects ) n_total_effects = len ( effects ) logger . info ( "Predicted total %d effects for variant %s" % ( n_total_effects , variant ) ) if not only_coding_changes : return effects else : nonsynonymous_coding_effects = effects . drop_silent_and_noncoding ( ) logger . info ( "Keeping %d/%d effects which affect protein coding sequence for %s: %s" , len ( nonsynonymous_coding_effects ) , n_total_effects , variant , nonsynonymous_coding_effects ) usable_effects = [ effect for effect in nonsynonymous_coding_effects if effect . mutant_protein_sequence is not None ] logger . info ( "Keeping %d effects with predictable AA sequences for %s: %s" , len ( usable_effects ) , variant , usable_effects ) return usable_effects
|
For a given variant return its set of predicted effects . Optionally filter to transcripts where this variant results in a non - synonymous change to the protein sequence .
|
1,467
|
def reference_transcripts_for_variant ( variant , transcript_id_whitelist = None , only_coding_changes = True ) : predicted_effects = predicted_effects_for_variant ( variant = variant , transcript_id_whitelist = transcript_id_whitelist , only_coding_changes = only_coding_changes ) return [ effect . transcript for effect in predicted_effects ]
|
For a given variant find all the transcripts which overlap the variant and for which it has a predictable effect on the amino acid sequence of the protein .
|
1,468
|
def pileup_reads_at_position ( samfile , chromosome , base0_position ) : for column in samfile . pileup ( chromosome , start = base0_position , end = base0_position + 1 ) : if column . pos != base0_position : continue return column . pileups return [ ]
|
Returns a pileup column at the specified position . Unclear if a function like this is hiding somewhere in pysam API .
|
1,469
|
def locus_read_generator ( samfile , chromosome , base1_position_before_variant , base1_position_after_variant , use_duplicate_reads = USE_DUPLICATE_READS , use_secondary_alignments = USE_SECONDARY_ALIGNMENTS , min_mapping_quality = MIN_READ_MAPPING_QUALITY ) : logger . debug ( "Gathering reads at locus %s: %d-%d" , chromosome , base1_position_before_variant , base1_position_after_variant ) base0_position_before_variant = base1_position_before_variant - 1 base0_position_after_variant = base1_position_after_variant - 1 count = 0 for pileup_element in pileup_reads_at_position ( samfile = samfile , chromosome = chromosome , base0_position = base0_position_before_variant ) : read = LocusRead . from_pysam_pileup_element ( pileup_element , base0_position_before_variant = base0_position_before_variant , base0_position_after_variant = base0_position_after_variant , use_secondary_alignments = use_secondary_alignments , use_duplicate_reads = use_duplicate_reads , min_mapping_quality = min_mapping_quality ) if read is not None : count += 1 yield read logger . info ( "Found %d reads overlapping locus %s: %d-%d" , count , chromosome , base1_position_before_variant , base1_position_after_variant )
|
Generator that yields a sequence of ReadAtLocus records for reads which contain the positions before and after a variant . The actual work to figure out if what s between those positions matches a variant happens later in the variant_reads module .
|
1,470
|
def locus_reads_dataframe ( * args , ** kwargs ) : df_builder = DataFrameBuilder ( LocusRead , variant_columns = False , converters = { "reference_positions" : list_to_string , "quality_scores" : list_to_string , } ) for locus_read in locus_read_generator ( * args , ** kwargs ) : df_builder . add ( variant = None , element = locus_read ) return df_builder . to_dataframe ( )
|
Traverse a BAM file to find all the reads overlapping a specified locus .
|
1,471
|
def copy_from_csv_sql ( qualified_name : str , delimiter = ',' , encoding = 'utf8' , null_str = '' , header = True , escape_str = '\\' , quote_char = '"' , force_not_null = None , force_null = None ) : options = [ ] options . append ( "DELIMITER '%s'" % delimiter ) options . append ( "NULL '%s'" % null_str ) if header : options . append ( 'HEADER' ) options . append ( "QUOTE '%s'" % quote_char ) options . append ( "ESCAPE '%s'" % escape_str ) if force_not_null : options . append ( _format_force_not_null ( column_names = force_not_null ) ) if force_null : options . append ( _format_force_null ( column_names = force_null ) ) postgres_encoding = get_postgres_encoding ( encoding ) options . append ( "ENCODING '%s'" % postgres_encoding ) copy_sql = _format_copy_csv_sql ( qualified_name , copy_options = options ) return copy_sql
|
Generate copy from csv statement .
|
1,472
|
def sort_protein_sequences ( protein_sequences ) : return list ( sorted ( protein_sequences , key = ProteinSequence . ascending_sort_key , reverse = True ) )
|
Sort protein sequences in decreasing order of priority
|
1,473
|
def reads_generator_to_protein_sequences_generator ( variant_and_overlapping_reads_generator , transcript_id_whitelist = None , protein_sequence_length = PROTEIN_SEQUENCE_LENGTH , min_alt_rna_reads = MIN_ALT_RNA_READS , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , min_transcript_prefix_length = MIN_TRANSCRIPT_PREFIX_LENGTH , max_transcript_mismatches = MAX_REFERENCE_TRANSCRIPT_MISMATCHES , include_mismatches_after_variant = INCLUDE_MISMATCHES_AFTER_VARIANT , max_protein_sequences_per_variant = MAX_PROTEIN_SEQUENCES_PER_VARIANT , variant_sequence_assembly = VARIANT_SEQUENCE_ASSEMBLY ) : for ( variant , overlapping_reads ) in variant_and_overlapping_reads_generator : overlapping_transcript_ids = [ t . id for t in variant . transcripts if t . is_protein_coding ] _ , ref , alt = trim_variant ( variant ) overlapping_reads = list ( overlapping_reads ) reads_grouped_by_allele = group_reads_by_allele ( overlapping_reads ) ref_reads = reads_grouped_by_allele . get ( ref , [ ] ) alt_reads = reads_grouped_by_allele . get ( alt , [ ] ) translations = translate_variant_reads ( variant = variant , variant_reads = alt_reads , transcript_id_whitelist = transcript_id_whitelist , protein_sequence_length = protein_sequence_length , min_alt_rna_reads = min_alt_rna_reads , min_variant_sequence_coverage = min_variant_sequence_coverage , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , variant_sequence_assembly = variant_sequence_assembly ) protein_sequences = [ ] for ( key , equivalent_translations ) in groupby ( translations , key_fn = Translation . as_translation_key ) . items ( ) : alt_reads_supporting_protein_sequence , group_transcript_ids , group_gene_names = ProteinSequence . _summarize_translations ( equivalent_translations ) logger . info ( "%s: %s alt reads supporting protein sequence (gene names = %s)" , key , len ( alt_reads_supporting_protein_sequence ) , group_gene_names ) protein_sequence = ProteinSequence . from_translation_key ( translation_key = key , translations = equivalent_translations , overlapping_reads = overlapping_reads , alt_reads = alt_reads , ref_reads = ref_reads , alt_reads_supporting_protein_sequence = alt_reads_supporting_protein_sequence , transcripts_supporting_protein_sequence = group_transcript_ids , transcripts_overlapping_variant = overlapping_transcript_ids , gene = list ( group_gene_names ) ) logger . info ( "%s: protein sequence = %s" % ( key , protein_sequence . amino_acids ) ) protein_sequences . append ( protein_sequence ) protein_sequences = sort_protein_sequences ( protein_sequences ) yield variant , protein_sequences [ : max_protein_sequences_per_variant ]
|
Translates each coding variant in a collection to one or more Translation objects which are then aggregated into equivalent ProteinSequence objects .
|
1,474
|
def from_translation_key ( cls , translation_key , translations , overlapping_reads , ref_reads , alt_reads , alt_reads_supporting_protein_sequence , transcripts_overlapping_variant , transcripts_supporting_protein_sequence , gene ) : return cls ( amino_acids = translation_key . amino_acids , variant_aa_interval_start = translation_key . variant_aa_interval_start , variant_aa_interval_end = translation_key . variant_aa_interval_end , ends_with_stop_codon = translation_key . ends_with_stop_codon , frameshift = translation_key . frameshift , translations = translations , overlapping_reads = overlapping_reads , ref_reads = ref_reads , alt_reads = alt_reads , alt_reads_supporting_protein_sequence = ( alt_reads_supporting_protein_sequence ) , transcripts_overlapping_variant = transcripts_overlapping_variant , transcripts_supporting_protein_sequence = ( transcripts_supporting_protein_sequence ) , gene = gene )
|
Create a ProteinSequence object from a TranslationKey along with all the extra fields a ProteinSequence requires .
|
1,475
|
def make_delete_table ( table : Table , delete_prefix = 'delete_from__' ) -> Table : name = delete_prefix + table . name primary_key = table . primary_key key_names = set ( primary_key . column_names ) columns = [ column for column in table . columns if column . name in key_names ] table = Table ( name , columns , primary_key ) return table
|
Table referencing a delete from using primary key join .
|
1,476
|
def trim_variant_fields ( location , ref , alt ) : if len ( alt ) > 0 and ref . startswith ( alt ) : ref = ref [ len ( alt ) : ] location += len ( alt ) alt = "" if len ( ref ) > 0 and alt . startswith ( ref ) : alt = alt [ len ( ref ) : ] location += len ( ref ) - 1 ref = "" return location , ref , alt
|
Trims common prefixes from the ref and alt sequences
|
1,477
|
def base0_interval_for_variant ( variant ) : base1_location , ref , alt = trim_variant ( variant ) return base0_interval_for_variant_fields ( base1_location = base1_location , ref = ref , alt = alt )
|
Inteval of interbase offsets of the affected reference positions for a particular variant .
|
1,478
|
def interbase_range_affected_by_variant_on_transcript ( variant , transcript ) : if variant . is_insertion : if transcript . strand == "+" : start_offset = transcript . spliced_offset ( variant . start ) + 1 else : start_offset = transcript . spliced_offset ( variant . start ) end_offset = start_offset else : offsets = [ ] assert len ( variant . ref ) > 0 for dna_pos in range ( variant . start , variant . start + len ( variant . ref ) ) : try : offsets . append ( transcript . spliced_offset ( dna_pos ) ) except ValueError : logger . info ( "Couldn't find position %d from %s on exons of %s" , dna_pos , variant , transcript ) if len ( offsets ) == 0 : raise ValueError ( "Couldn't find any exonic reference bases affected by %s on %s" , variant , transcript ) start_offset = min ( offsets ) end_offset = max ( offsets ) + 1 return ( start_offset , end_offset )
|
Convert from a variant s position in global genomic coordinates on the forward strand to an interval of interbase offsets on a particular transcript s mRNA .
|
1,479
|
def insert ( conn , qualified_name : str , column_names , records ) : query = create_insert_statement ( qualified_name , column_names ) with conn : with conn . cursor ( cursor_factory = NamedTupleCursor ) as cursor : for record in records : cursor . execute ( query , record )
|
Insert a collection of namedtuple records .
|
1,480
|
def insert_many ( conn , tablename , column_names , records , chunksize = 2500 ) : groups = chunks ( records , chunksize ) column_str = ',' . join ( column_names ) insert_template = 'INSERT INTO {table} ({columns}) VALUES {values}' . format ( table = tablename , columns = column_str , values = '{0}' ) with conn : with conn . cursor ( ) as cursor : for recs in groups : record_group = list ( recs ) records_template_str = ',' . join ( [ '%s' ] * len ( record_group ) ) insert_query = insert_template . format ( records_template_str ) cursor . execute ( insert_query , record_group )
|
Insert many records by chunking data into insert statements .
|
1,481
|
def upsert_records ( conn , records , upsert_statement ) : with conn : with conn . cursor ( ) as cursor : for record in records : cursor . execute ( upsert_statement , record )
|
Upsert records .
|
1,482
|
def delete_joined_table_sql ( qualified_name , removing_qualified_name , primary_key ) : condition_template = 't.{}=d.{}' where_clause = ' AND ' . join ( condition_template . format ( pkey , pkey ) for pkey in primary_key ) delete_statement = ( 'DELETE FROM {table} t' ' USING {delete_table} d' ' WHERE {where_clause}' ) . format ( table = qualified_name , delete_table = removing_qualified_name , where_clause = where_clause ) return delete_statement
|
SQL statement for a joined delete from . Generate SQL statement for deleting the intersection of rows between both tables from table referenced by tablename .
|
1,483
|
def copy_from_csv ( conn , file , qualified_name : str , delimiter = ',' , encoding = 'utf8' , null_str = '' , header = True , escape_str = '\\' , quote_char = '"' , force_not_null = None , force_null = None ) : copy_sql = copy_from_csv_sql ( qualified_name , delimiter , encoding , null_str = null_str , header = header , escape_str = escape_str , quote_char = quote_char , force_not_null = force_not_null , force_null = force_null ) with conn : with conn . cursor ( ) as cursor : cursor . copy_expert ( copy_sql , file )
|
Copy file - like object to database table .
|
1,484
|
def get_user_tables ( conn ) : query_string = "select schemaname, relname from pg_stat_user_tables;" with conn . cursor ( ) as cursor : cursor . execute ( query_string ) tables = cursor . fetchall ( ) return tables
|
Retrieve all user tables .
|
1,485
|
def get_column_metadata ( conn , table : str , schema = 'public' ) : query = qualified_name = compile_qualified_name ( table , schema = schema ) for record in select_dict ( conn , query , params = ( qualified_name , ) ) : yield record
|
Returns column data following db . Column parameter specification .
|
1,486
|
def reflect_table ( conn , table_name , schema = 'public' ) : column_meta = list ( get_column_metadata ( conn , table_name , schema = schema ) ) primary_key_columns = list ( get_primary_keys ( conn , table_name , schema = schema ) ) columns = [ Column ( ** column_data ) for column_data in column_meta ] primary_key = PrimaryKey ( primary_key_columns ) return Table ( table_name , columns , primary_key , schema = schema )
|
Reflect basic table attributes .
|
1,487
|
def reset ( db_name ) : conn = psycopg2 . connect ( database = 'postgres' ) db = Database ( db_name ) conn . autocommit = True with conn . cursor ( ) as cursor : cursor . execute ( db . drop_statement ( ) ) cursor . execute ( db . create_statement ( ) ) conn . close ( )
|
Reset database .
|
1,488
|
def install_extensions ( extensions , ** connection_parameters ) : from postpy . connections import connect conn = connect ( ** connection_parameters ) conn . autocommit = True for extension in extensions : install_extension ( conn , extension )
|
Install Postgres extension if available .
|
1,489
|
def update ( self , status ) : logging . info ( 'Executor sends status update {} for task {}' . format ( status . state , status . task_id ) ) return self . driver . sendStatusUpdate ( encode ( status ) )
|
Sends a status update to the framework scheduler .
|
1,490
|
def message ( self , data ) : logging . info ( 'Driver sends framework message {}' . format ( data ) ) return self . driver . sendFrameworkMessage ( data )
|
Sends a message to the framework scheduler .
|
1,491
|
def get_current_time ( self ) : hms = [ int ( self . get_current_controller_value ( i ) ) for i in range ( 406 , 409 ) ] return datetime . time ( * hms )
|
Get current time
|
1,492
|
def get_loco_name ( self ) : ret_str = self . dll . GetLocoName ( ) . decode ( ) if not ret_str : return return ret_str . split ( '.:.' )
|
Returns the Provider Product and Engine name .
|
1,493
|
def set_controller_value ( self , index_or_name , value ) : if not isinstance ( index_or_name , int ) : index = self . get_controller_index ( index_or_name ) else : index = index_or_name self . dll . SetControllerValue ( index , ctypes . c_float ( value ) )
|
Sets controller value
|
1,494
|
def stop ( self , failover = False ) : logging . info ( 'Stops Scheduler Driver' ) return self . driver . stop ( failover )
|
Stops the scheduler driver .
|
1,495
|
def request ( self , requests ) : logging . info ( 'Request resources from Mesos' ) return self . driver . requestResources ( map ( encode , requests ) )
|
Requests resources from Mesos .
|
1,496
|
def launch ( self , offer_id , tasks , filters = Filters ( ) ) : logging . info ( 'Launches tasks {}' . format ( tasks ) ) return self . driver . launchTasks ( encode ( offer_id ) , map ( encode , tasks ) , encode ( filters ) )
|
Launches the given set of tasks .
|
1,497
|
def kill ( self , task_id ) : logging . info ( 'Kills task {}' . format ( task_id ) ) return self . driver . killTask ( encode ( task_id ) )
|
Kills the specified task .
|
1,498
|
def reconcile ( self , statuses ) : logging . info ( 'Reconciles task statuses {}' . format ( statuses ) ) return self . driver . reconcileTasks ( map ( encode , statuses ) )
|
Allows the framework to query the status for non - terminal tasks .
|
1,499
|
def accept ( self , offer_ids , operations , filters = Filters ( ) ) : logging . info ( 'Accepts offers {}' . format ( offer_ids ) ) return self . driver . acceptOffers ( map ( encode , offer_ids ) , map ( encode , operations ) , encode ( filters ) )
|
Accepts the given offers and performs a sequence of operations on those accepted offers .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.